From cb5feace5d154230570ca509f4ea8c88a73bcc63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Ber=C3=A1nek?= Date: Mon, 30 Jun 2025 14:05:56 +0200 Subject: [PATCH] Add new `cargo` stable benchmark --- collector/compile-benchmarks/README.md | 3 + collector/compile-benchmarks/REUSE.toml | 15 + collector/compile-benchmarks/cargo/.gitignore | 14 + .../compile-benchmarks/cargo/.travis.yml | 60 + .../compile-benchmarks/cargo/0-println.patch | 13 + .../compile-benchmarks/cargo/ARCHITECTURE.md | 90 + .../compile-benchmarks/cargo/CONTRIBUTING.md | 173 + collector/compile-benchmarks/cargo/Cargo.lock | 1185 ++ collector/compile-benchmarks/cargo/Cargo.toml | 81 + .../compile-benchmarks/cargo/LICENSE-APACHE | 201 + .../compile-benchmarks/cargo/LICENSE-MIT | 23 + .../cargo/LICENSE-THIRD-PARTY | 1272 ++ collector/compile-benchmarks/cargo/README.md | 84 + .../compile-benchmarks/cargo/appveyor.yml | 41 + .../compile-benchmarks/cargo/perf-config.json | 8 + .../cargo/socket2-0.2.3/.appveyor.yml | 16 + .../cargo/socket2-0.2.3/.gitignore | 3 + .../cargo/socket2-0.2.3/.travis.yml | 27 + .../cargo/socket2-0.2.3/Cargo.toml | 44 + .../cargo/socket2-0.2.3/Cargo.toml.orig | 33 + .../cargo/socket2-0.2.3/LICENSE-APACHE | 201 + .../cargo/socket2-0.2.3/LICENSE-MIT | 25 + .../cargo/socket2-0.2.3/README.md | 14 + .../cargo/socket2-0.2.3/src/lib.rs | 130 + .../cargo/socket2-0.2.3/src/sockaddr.rs | 194 + .../cargo/socket2-0.2.3/src/socket.rs | 852 + .../cargo/socket2-0.2.3/src/sys/unix/mod.rs | 1028 ++ .../cargo/socket2-0.2.3/src/sys/unix/weak.rs | 60 + .../cargo/socket2-0.2.3/src/sys/windows.rs | 913 ++ .../cargo/socket2-0.2.3/src/utils.rs | 51 + .../compile-benchmarks/cargo/src/bin/bench.rs | 153 + .../compile-benchmarks/cargo/src/bin/build.rs | 131 + .../compile-benchmarks/cargo/src/bin/cargo.rs | 427 + .../compile-benchmarks/cargo/src/bin/check.rs | 132 + .../compile-benchmarks/cargo/src/bin/clean.rs | 67 + .../compile-benchmarks/cargo/src/bin/doc.rs | 126 + .../compile-benchmarks/cargo/src/bin/fetch.rs | 56 + .../cargo/src/bin/generate_lockfile.rs | 50 + .../cargo/src/bin/git_checkout.rs | 54 + .../compile-benchmarks/cargo/src/bin/help.rs | 22 + .../compile-benchmarks/cargo/src/bin/init.rs | 73 + .../cargo/src/bin/install.rs | 163 + .../cargo/src/bin/locate_project.rs | 38 + .../compile-benchmarks/cargo/src/bin/login.rs | 69 + .../cargo/src/bin/metadata.rs | 75 + .../compile-benchmarks/cargo/src/bin/new.rs | 73 + .../compile-benchmarks/cargo/src/bin/owner.rs | 68 + .../cargo/src/bin/package.rs | 66 + .../compile-benchmarks/cargo/src/bin/pkgid.rs | 80 + .../cargo/src/bin/publish.rs | 105 + .../cargo/src/bin/read_manifest.rs | 39 + .../compile-benchmarks/cargo/src/bin/run.rs | 136 + .../compile-benchmarks/cargo/src/bin/rustc.rs | 140 + .../cargo/src/bin/rustdoc.rs | 127 + .../cargo/src/bin/search.rs | 82 + .../compile-benchmarks/cargo/src/bin/test.rs | 186 + .../cargo/src/bin/uninstall.rs | 55 + .../cargo/src/bin/update.rs | 83 + .../cargo/src/bin/verify_project.rs | 77 + .../cargo/src/bin/version.rs | 27 + .../compile-benchmarks/cargo/src/bin/yank.rs | 63 + .../cargo/src/cargo/core/dependency.rs | 369 + .../cargo/src/cargo/core/features.rs | 279 + .../cargo/src/cargo/core/manifest.rs | 709 + .../cargo/src/cargo/core/mod.rs | 26 + .../cargo/src/cargo/core/package.rs | 219 + .../cargo/src/cargo/core/package_id.rs | 190 + .../cargo/src/cargo/core/package_id_spec.rs | 280 + .../cargo/src/cargo/core/registry.rs | 607 + .../cargo/src/cargo/core/resolver/encode.rs | 420 + .../cargo/src/cargo/core/resolver/mod.rs | 1265 ++ .../cargo/src/cargo/core/shell.rs | 251 + .../cargo/src/cargo/core/source.rs | 652 + .../cargo/src/cargo/core/summary.rs | 124 + .../cargo/src/cargo/core/workspace.rs | 702 + .../compile-benchmarks/cargo/src/cargo/lib.rs | 255 + .../cargo/src/cargo/ops/cargo_clean.rs | 109 + .../cargo/src/cargo/ops/cargo_compile.rs | 784 + .../cargo/src/cargo/ops/cargo_doc.rs | 149 + .../cargo/src/cargo/ops/cargo_fetch.rs | 12 + .../src/cargo/ops/cargo_generate_lockfile.rs | 184 + .../cargo/src/cargo/ops/cargo_install.rs | 633 + .../cargo/src/cargo/ops/cargo_new.rs | 607 + .../src/cargo/ops/cargo_output_metadata.rs | 106 + .../cargo/src/cargo/ops/cargo_package.rs | 340 + .../cargo/src/cargo/ops/cargo_pkgid.rs | 16 + .../src/cargo/ops/cargo_read_manifest.rs | 165 + .../cargo/src/cargo/ops/cargo_run.rs | 74 + .../src/cargo/ops/cargo_rustc/compilation.rs | 187 + .../src/cargo/ops/cargo_rustc/context.rs | 1313 ++ .../src/cargo/ops/cargo_rustc/custom_build.rs | 538 + .../src/cargo/ops/cargo_rustc/fingerprint.rs | 722 + .../cargo/src/cargo/ops/cargo_rustc/job.rs | 67 + .../src/cargo/ops/cargo_rustc/job_queue.rs | 426 + .../cargo/src/cargo/ops/cargo_rustc/layout.rs | 191 + .../cargo/src/cargo/ops/cargo_rustc/links.rs | 64 + .../cargo/src/cargo/ops/cargo_rustc/mod.rs | 941 ++ .../cargo/ops/cargo_rustc/output_depinfo.rs | 95 + .../cargo/src/cargo/ops/cargo_test.rs | 214 + .../cargo/src/cargo/ops/lockfile.rs | 149 + .../cargo/src/cargo/ops/mod.rs | 43 + .../cargo/src/cargo/ops/registry.rs | 445 + .../cargo/src/cargo/ops/resolve.rs | 311 + .../cargo/src/cargo/sources/config.rs | 226 + .../cargo/src/cargo/sources/directory.rs | 204 + .../cargo/src/cargo/sources/git/mod.rs | 4 + .../cargo/src/cargo/sources/git/source.rs | 268 + .../cargo/src/cargo/sources/git/utils.rs | 733 + .../cargo/src/cargo/sources/mod.rs | 13 + .../cargo/src/cargo/sources/path.rs | 544 + .../cargo/src/cargo/sources/registry/index.rs | 192 + .../cargo/src/cargo/sources/registry/local.rs | 105 + .../cargo/src/cargo/sources/registry/mod.rs | 516 + .../src/cargo/sources/registry/remote.rs | 260 + .../cargo/src/cargo/sources/replaced.rs | 75 + .../cargo/src/cargo/util/cfg.rs | 261 + .../cargo/src/cargo/util/config.rs | 882 ++ .../cargo/src/cargo/util/dependency_queue.rs | 144 + .../cargo/src/cargo/util/errors.rs | 288 + .../cargo/src/cargo/util/flock.rs | 344 + .../cargo/src/cargo/util/graph.rs | 105 + .../cargo/src/cargo/util/hex.rs | 27 + .../cargo/src/cargo/util/important_paths.rs | 65 + .../cargo/src/cargo/util/job.rs | 260 + .../cargo/src/cargo/util/lazy_cell.rs | 73 + .../cargo/src/cargo/util/lev_distance.rs | 53 + .../cargo/src/cargo/util/machine_message.rs | 58 + .../cargo/src/cargo/util/mod.rs | 44 + .../cargo/src/cargo/util/network.rs | 106 + .../cargo/src/cargo/util/paths.rs | 183 + .../cargo/src/cargo/util/process_builder.rs | 312 + .../cargo/src/cargo/util/profile.rs | 71 + .../cargo/src/cargo/util/read2.rs | 177 + .../cargo/src/cargo/util/rustc.rs | 62 + .../cargo/src/cargo/util/sha256.rs | 23 + .../cargo/src/cargo/util/to_semver.rs | 30 + .../cargo/src/cargo/util/to_url.rs | 27 + .../cargo/src/cargo/util/toml/mod.rs | 1085 ++ .../cargo/src/cargo/util/toml/targets.rs | 493 + .../cargo/src/cargo/util/vcs.rs | 66 + .../compile-benchmarks/cargo/src/ci/dox.sh | 33 + .../cargo/src/crates-io/Cargo.toml | 21 + .../cargo/src/crates-io/lib.rs | 343 + .../compile-benchmarks/cargo/src/doc/CNAME | 1 + .../cargo/src/doc/MIGRATION_MAP | 12 + .../cargo/src/doc/README.md | 6 + .../cargo/src/doc/book/.gitignore | 1 + .../cargo/src/doc/book/README.md | 47 + .../cargo/src/doc/book/book.toml | 2 + .../cargo/src/doc/book/src/SUMMARY.md | 30 + .../cargo/src/doc/book/src/faq.md | 193 + .../book/src/getting-started/first-steps.md | 70 + .../src/doc/book/src/getting-started/index.md | 6 + .../book/src/getting-started/installation.md | 38 + .../src/guide/cargo-toml-vs-cargo-lock.md | 103 + .../book/src/guide/continuous-integration.md | 21 + .../book/src/guide/creating-a-new-project.md | 89 + .../src/doc/book/src/guide/dependencies.md | 90 + .../cargo/src/doc/book/src/guide/index.md | 13 + .../src/doc/book/src/guide/project-layout.md | 35 + .../cargo/src/doc/book/src/guide/tests.md | 39 + .../doc/book/src/guide/why-cargo-exists.md | 12 + .../guide/working-on-an-existing-project.md | 22 + .../doc/book/src/images/Cargo-Logo-Small.png | Bin 0 -> 58168 bytes .../doc/book/src/images/auth-level-acl.png | Bin 0 -> 90300 bytes .../src/doc/book/src/images/org-level-acl.png | Bin 0 -> 76572 bytes .../cargo/src/doc/book/src/index.md | 28 + .../doc/book/src/reference/build-scripts.md | 556 + .../src/doc/book/src/reference/config.md | 139 + .../src/reference/environment-variables.md | 130 + .../doc/book/src/reference/external-tools.md | 103 + .../cargo/src/doc/book/src/reference/index.md | 16 + .../src/doc/book/src/reference/manifest.md | 762 + .../src/doc/book/src/reference/pkgid-spec.md | 44 + .../src/doc/book/src/reference/publishing.md | 222 + .../book/src/reference/source-replacement.md | 128 + .../src/reference/specifying-dependencies.md | 524 + .../cargo/src/doc/book/theme/favicon.png | Bin 0 -> 5430 bytes .../cargo/src/doc/build-script.md | 556 + .../cargo/src/doc/config.md | 138 + .../cargo/src/doc/crates-io.md | 222 + .../cargo/src/doc/environment-variables.md | 131 + .../cargo/src/doc/external-tools.md | 103 + .../compile-benchmarks/cargo/src/doc/faq.md | 193 + .../cargo/src/doc/favicon.ico | Bin 0 -> 5430 bytes .../cargo/src/doc/footer.html | 11 + .../compile-benchmarks/cargo/src/doc/guide.md | 446 + .../cargo/src/doc/header.html | 52 + .../cargo/src/doc/html-headers.html | 2 + .../cargo/src/doc/images/Cargo-Logo-Small.png | Bin 0 -> 58168 bytes .../cargo/src/doc/images/auth-level-acl.png | Bin 0 -> 90300 bytes .../cargo/src/doc/images/circle-with-i.png | Bin 0 -> 496 bytes .../cargo/src/doc/images/forkme.png | Bin 0 -> 4725 bytes .../cargo/src/doc/images/noise.png | Bin 0 -> 3190 bytes .../cargo/src/doc/images/org-level-acl.png | Bin 0 -> 76572 bytes .../cargo/src/doc/images/search.png | Bin 0 -> 312 bytes .../compile-benchmarks/cargo/src/doc/index.md | 111 + .../cargo/src/doc/javascripts/all.js | 40 + .../cargo/src/doc/javascripts/prism.js | 6 + .../cargo/src/doc/manifest.md | 769 + .../cargo/src/doc/pkgid-spec.md | 44 + .../cargo/src/doc/policies.html | 10 + .../cargo/src/doc/source-replacement.md | 134 + .../cargo/src/doc/specifying-dependencies.md | 525 + .../cargo/src/doc/stylesheets/all.css | 291 + .../cargo/src/doc/stylesheets/normalize.css | 375 + .../cargo/src/doc/stylesheets/prism.css | 197 + .../compile-benchmarks/cargo/src/etc/_cargo | 544 + .../cargo/src/etc/cargo.bashcomp.sh | 211 + .../cargo/src/etc/man/cargo-bench.1 | 143 + .../cargo/src/etc/man/cargo-build.1 | 132 + .../cargo/src/etc/man/cargo-check.1 | 132 + .../cargo/src/etc/man/cargo-clean.1 | 82 + .../cargo/src/etc/man/cargo-doc.1 | 109 + .../cargo/src/etc/man/cargo-fetch.1 | 52 + .../src/etc/man/cargo-generate-lockfile.1 | 41 + .../cargo/src/etc/man/cargo-init.1 | 68 + .../cargo/src/etc/man/cargo-install.1 | 157 + .../cargo/src/etc/man/cargo-login.1 | 41 + .../cargo/src/etc/man/cargo-metadata.1 | 71 + .../cargo/src/etc/man/cargo-new.1 | 68 + .../cargo/src/etc/man/cargo-owner.1 | 88 + .../cargo/src/etc/man/cargo-package.1 | 59 + .../cargo/src/etc/man/cargo-pkgid.1 | 75 + .../cargo/src/etc/man/cargo-publish.1 | 59 + .../cargo/src/etc/man/cargo-run.1 | 103 + .../cargo/src/etc/man/cargo-rustc.1 | 126 + .../cargo/src/etc/man/cargo-rustdoc.1 | 124 + .../cargo/src/etc/man/cargo-search.1 | 49 + .../cargo/src/etc/man/cargo-test.1 | 172 + .../cargo/src/etc/man/cargo-uninstall.1 | 56 + .../cargo/src/etc/man/cargo-update.1 | 80 + .../cargo/src/etc/man/cargo-version.1 | 31 + .../cargo/src/etc/man/cargo-yank.1 | 68 + .../cargo/src/etc/man/cargo.1 | 206 + .../cargo/tests/bad-config.rs | 1097 ++ .../cargo/tests/bad-manifest-path.rs | 375 + .../compile-benchmarks/cargo/tests/bench.rs | 1335 ++ .../cargo/tests/build-auth.rs | 213 + .../cargo/tests/build-lib.rs | 88 + .../cargo/tests/build-script-env.rs | 106 + .../cargo/tests/build-script.rs | 2734 ++++ .../compile-benchmarks/cargo/tests/build.rs | 3911 +++++ .../cargo/tests/cargo-features.rs | 273 + .../compile-benchmarks/cargo/tests/cargo.rs | 218 + .../cargo/tests/cargo_alias_config.rs | 123 + .../cargo/tests/cargotest/Cargo.toml | 19 + .../cargo/tests/cargotest/install.rs | 30 + .../cargo/tests/cargotest/lib.rs | 90 + .../tests/cargotest/support/cross_compile.rs | 131 + .../cargo/tests/cargotest/support/git.rs | 142 + .../cargo/tests/cargotest/support/mod.rs | 873 ++ .../cargo/tests/cargotest/support/paths.rs | 161 + .../cargo/tests/cargotest/support/publish.rs | 30 + .../cargo/tests/cargotest/support/registry.rs | 278 + .../compile-benchmarks/cargo/tests/cfg.rs | 374 + .../cargo/tests/check-style.sh | 3 + .../compile-benchmarks/cargo/tests/check.rs | 461 + .../compile-benchmarks/cargo/tests/clean.rs | 239 + .../cargo/tests/concurrent.rs | 500 + .../compile-benchmarks/cargo/tests/config.rs | 28 + .../cargo/tests/cross-compile.rs | 1007 ++ .../cargo/tests/cross-publish.rs | 100 + .../compile-benchmarks/cargo/tests/death.rs | 137 + .../cargo/tests/dep-info.rs | 84 + .../cargo/tests/directory.rs | 633 + .../compile-benchmarks/cargo/tests/doc.rs | 903 ++ .../cargo/tests/features.rs | 1238 ++ .../compile-benchmarks/cargo/tests/fetch.rs | 24 + .../cargo/tests/freshness.rs | 735 + .../cargo/tests/generate-lockfile.rs | 190 + .../compile-benchmarks/cargo/tests/git.rs | 2176 +++ .../compile-benchmarks/cargo/tests/init.rs | 433 + .../compile-benchmarks/cargo/tests/install.rs | 943 ++ .../cargo/tests/jobserver.rs | 183 + .../cargo/tests/local-registry.rs | 404 + .../cargo/tests/lockfile-compat.rs | 451 + .../compile-benchmarks/cargo/tests/login.rs | 128 + .../cargo/tests/metadata.rs | 692 + .../cargo/tests/net-config.rs | 58 + .../compile-benchmarks/cargo/tests/new.rs | 444 + .../cargo/tests/overrides.rs | 1277 ++ .../compile-benchmarks/cargo/tests/package.rs | 880 ++ .../compile-benchmarks/cargo/tests/patch.rs | 798 + .../compile-benchmarks/cargo/tests/path.rs | 988 ++ .../compile-benchmarks/cargo/tests/plugins.rs | 378 + .../cargo/tests/proc-macro.rs | 281 + .../cargo/tests/profiles.rs | 286 + .../compile-benchmarks/cargo/tests/publish.rs | 502 + .../cargo/tests/read-manifest.rs | 96 + .../cargo/tests/registry.rs | 1453 ++ .../cargo/tests/required-features.rs | 1000 ++ .../compile-benchmarks/cargo/tests/resolve.rs | 412 + .../compile-benchmarks/cargo/tests/run.rs | 883 ++ .../compile-benchmarks/cargo/tests/rustc.rs | 397 + .../compile-benchmarks/cargo/tests/rustdoc.rs | 170 + .../cargo/tests/rustdocflags.rs | 88 + .../cargo/tests/rustflags.rs | 1161 ++ .../compile-benchmarks/cargo/tests/search.rs | 280 + .../cargo/tests/small-fd-limits.rs | 109 + .../compile-benchmarks/cargo/tests/test.rs | 2937 ++++ .../cargo/tests/tool-paths.rs | 173 + .../cargo/tests/verify-project.rs | 50 + .../compile-benchmarks/cargo/tests/version.rs | 50 + .../cargo/tests/warn-on-failure.rs | 93 + .../cargo/tests/workspaces.rs | 1654 ++ .../cargo/url-1.5.1/.gitignore | 3 + .../cargo/url-1.5.1/.travis.yml | 9 + .../cargo/url-1.5.1/Cargo.toml | 49 + .../cargo/url-1.5.1/LICENSE-APACHE | 201 + .../cargo/url-1.5.1/LICENSE-MIT | 25 + .../cargo/url-1.5.1/Makefile | 6 + .../cargo/url-1.5.1/README.md | 10 + .../cargo/url-1.5.1/UPGRADING.md | 263 + .../cargo/url-1.5.1/appveyor.yml | 13 + .../cargo/url-1.5.1/docs/.nojekyll | 0 .../cargo/url-1.5.1/docs/404.html | 3 + .../cargo/url-1.5.1/docs/index.html | 3 + .../cargo/url-1.5.1/fuzz/.gitignore | 4 + .../cargo/url-1.5.1/fuzz/Cargo.toml | 21 + .../cargo/url-1.5.1/fuzz/fuzzers/parse.rs | 10 + .../cargo/url-1.5.1/github.png | Bin 0 -> 7786 bytes .../cargo/url-1.5.1/idna/Cargo.toml | 27 + .../url-1.5.1/idna/src/IdnaMappingTable.txt | 8350 ++++++++++ .../cargo/url-1.5.1/idna/src/lib.rs | 73 + .../idna/src/make_uts46_mapping_table.py | 139 + .../cargo/url-1.5.1/idna/src/punycode.rs | 212 + .../cargo/url-1.5.1/idna/src/uts46.rs | 415 + .../url-1.5.1/idna/src/uts46_mapping_table.rs | 12822 ++++++++++++++++ .../cargo/url-1.5.1/idna/tests/IdnaTest.txt | 7848 ++++++++++ .../cargo/url-1.5.1/idna/tests/punycode.rs | 65 + .../url-1.5.1/idna/tests/punycode_tests.json | 120 + .../cargo/url-1.5.1/idna/tests/tests.rs | 25 + .../cargo/url-1.5.1/idna/tests/unit.rs | 40 + .../cargo/url-1.5.1/idna/tests/uts46.rs | 124 + .../url-1.5.1/percent_encoding/Cargo.toml | 16 + .../cargo/url-1.5.1/percent_encoding/lib.rs | 442 + .../cargo/url-1.5.1/rust-url-todo | 14 + .../cargo/url-1.5.1/src/encoding.rs | 146 + .../cargo/url-1.5.1/src/form_urlencoded.rs | 369 + .../cargo/url-1.5.1/src/host.rs | 502 + .../cargo/url-1.5.1/src/lib.rs | 2403 +++ .../cargo/url-1.5.1/src/origin.rs | 130 + .../cargo/url-1.5.1/src/parser.rs | 1182 ++ .../cargo/url-1.5.1/src/path_segments.rs | 217 + .../cargo/url-1.5.1/src/quirks.rs | 217 + .../cargo/url-1.5.1/src/slicing.rs | 182 + .../cargo/url-1.5.1/tests/data.rs | 203 + .../cargo/url-1.5.1/tests/setters_tests.json | 1148 ++ .../cargo/url-1.5.1/tests/unit.rs | 480 + .../cargo/url-1.5.1/tests/urltestdata.json | 4445 ++++++ .../cargo/url-1.5.1/url_serde/Cargo.toml | 23 + .../cargo/url-1.5.1/url_serde/README.md | 11 + .../cargo/url-1.5.1/url_serde/src/lib.rs | 410 + 354 files changed, 129502 insertions(+) create mode 100644 collector/compile-benchmarks/cargo/.gitignore create mode 100644 collector/compile-benchmarks/cargo/.travis.yml create mode 100644 collector/compile-benchmarks/cargo/0-println.patch create mode 100644 collector/compile-benchmarks/cargo/ARCHITECTURE.md create mode 100644 collector/compile-benchmarks/cargo/CONTRIBUTING.md create mode 100644 collector/compile-benchmarks/cargo/Cargo.lock create mode 100644 collector/compile-benchmarks/cargo/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/LICENSE-APACHE create mode 100644 collector/compile-benchmarks/cargo/LICENSE-MIT create mode 100644 collector/compile-benchmarks/cargo/LICENSE-THIRD-PARTY create mode 100644 collector/compile-benchmarks/cargo/README.md create mode 100644 collector/compile-benchmarks/cargo/appveyor.yml create mode 100644 collector/compile-benchmarks/cargo/perf-config.json create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/.appveyor.yml create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/.gitignore create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/.travis.yml create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml.orig create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-APACHE create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-MIT create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/README.md create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/lib.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/sockaddr.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/socket.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/mod.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/weak.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/windows.rs create mode 100644 collector/compile-benchmarks/cargo/socket2-0.2.3/src/utils.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/bench.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/build.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/cargo.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/check.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/clean.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/doc.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/fetch.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/generate_lockfile.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/git_checkout.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/help.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/init.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/install.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/locate_project.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/login.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/metadata.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/new.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/owner.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/package.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/pkgid.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/publish.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/read_manifest.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/run.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/rustc.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/rustdoc.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/search.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/test.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/uninstall.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/update.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/verify_project.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/version.rs create mode 100644 collector/compile-benchmarks/cargo/src/bin/yank.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/dependency.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/features.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/manifest.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/package.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/package_id.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/package_id_spec.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/registry.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/resolver/encode.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/resolver/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/shell.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/source.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/summary.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/core/workspace.rs create mode 100755 collector/compile-benchmarks/cargo/src/cargo/lib.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_clean.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_compile.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_doc.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_fetch.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_generate_lockfile.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_install.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_new.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_output_metadata.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_package.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_pkgid.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_read_manifest.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_run.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/compilation.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/context.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/custom_build.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/fingerprint.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job_queue.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/layout.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/links.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/output_depinfo.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/cargo_test.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/lockfile.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/registry.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/ops/resolve.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/config.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/directory.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/git/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/git/source.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/git/utils.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/path.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/registry/index.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/registry/local.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/registry/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/registry/remote.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/sources/replaced.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/cfg.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/config.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/dependency_queue.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/errors.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/flock.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/graph.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/hex.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/important_paths.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/job.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/lazy_cell.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/lev_distance.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/machine_message.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/network.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/paths.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/process_builder.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/profile.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/read2.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/rustc.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/sha256.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/to_semver.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/to_url.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/toml/mod.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/toml/targets.rs create mode 100644 collector/compile-benchmarks/cargo/src/cargo/util/vcs.rs create mode 100644 collector/compile-benchmarks/cargo/src/ci/dox.sh create mode 100644 collector/compile-benchmarks/cargo/src/crates-io/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/src/crates-io/lib.rs create mode 100644 collector/compile-benchmarks/cargo/src/doc/CNAME create mode 100644 collector/compile-benchmarks/cargo/src/doc/MIGRATION_MAP create mode 100644 collector/compile-benchmarks/cargo/src/doc/README.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/.gitignore create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/README.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/book.toml create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/SUMMARY.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/faq.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/first-steps.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/index.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/installation.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/cargo-toml-vs-cargo-lock.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/continuous-integration.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/creating-a-new-project.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/dependencies.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/index.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/project-layout.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/tests.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/why-cargo-exists.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/guide/working-on-an-existing-project.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/images/Cargo-Logo-Small.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/images/auth-level-acl.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/images/org-level-acl.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/index.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/build-scripts.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/config.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/environment-variables.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/external-tools.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/index.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/manifest.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/pkgid-spec.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/publishing.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/source-replacement.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/src/reference/specifying-dependencies.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/book/theme/favicon.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/build-script.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/config.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/crates-io.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/environment-variables.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/external-tools.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/faq.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/favicon.ico create mode 100644 collector/compile-benchmarks/cargo/src/doc/footer.html create mode 100644 collector/compile-benchmarks/cargo/src/doc/guide.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/header.html create mode 100644 collector/compile-benchmarks/cargo/src/doc/html-headers.html create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/Cargo-Logo-Small.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/auth-level-acl.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/circle-with-i.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/forkme.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/noise.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/org-level-acl.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/images/search.png create mode 100644 collector/compile-benchmarks/cargo/src/doc/index.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/javascripts/all.js create mode 100644 collector/compile-benchmarks/cargo/src/doc/javascripts/prism.js create mode 100644 collector/compile-benchmarks/cargo/src/doc/manifest.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/pkgid-spec.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/policies.html create mode 100644 collector/compile-benchmarks/cargo/src/doc/source-replacement.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/specifying-dependencies.md create mode 100644 collector/compile-benchmarks/cargo/src/doc/stylesheets/all.css create mode 100644 collector/compile-benchmarks/cargo/src/doc/stylesheets/normalize.css create mode 100644 collector/compile-benchmarks/cargo/src/doc/stylesheets/prism.css create mode 100644 collector/compile-benchmarks/cargo/src/etc/_cargo create mode 100644 collector/compile-benchmarks/cargo/src/etc/cargo.bashcomp.sh create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-bench.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-build.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-check.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-clean.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-doc.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-fetch.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-generate-lockfile.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-init.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-install.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-login.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-metadata.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-new.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-owner.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-package.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-pkgid.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-publish.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-run.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-rustc.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-rustdoc.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-search.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-test.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-uninstall.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-update.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-version.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo-yank.1 create mode 100644 collector/compile-benchmarks/cargo/src/etc/man/cargo.1 create mode 100644 collector/compile-benchmarks/cargo/tests/bad-config.rs create mode 100644 collector/compile-benchmarks/cargo/tests/bad-manifest-path.rs create mode 100644 collector/compile-benchmarks/cargo/tests/bench.rs create mode 100644 collector/compile-benchmarks/cargo/tests/build-auth.rs create mode 100644 collector/compile-benchmarks/cargo/tests/build-lib.rs create mode 100644 collector/compile-benchmarks/cargo/tests/build-script-env.rs create mode 100644 collector/compile-benchmarks/cargo/tests/build-script.rs create mode 100644 collector/compile-benchmarks/cargo/tests/build.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargo-features.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargo.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargo_alias_config.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/install.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/lib.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/cross_compile.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/git.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/mod.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/paths.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/publish.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cargotest/support/registry.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cfg.rs create mode 100755 collector/compile-benchmarks/cargo/tests/check-style.sh create mode 100644 collector/compile-benchmarks/cargo/tests/check.rs create mode 100644 collector/compile-benchmarks/cargo/tests/clean.rs create mode 100644 collector/compile-benchmarks/cargo/tests/concurrent.rs create mode 100644 collector/compile-benchmarks/cargo/tests/config.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cross-compile.rs create mode 100644 collector/compile-benchmarks/cargo/tests/cross-publish.rs create mode 100644 collector/compile-benchmarks/cargo/tests/death.rs create mode 100644 collector/compile-benchmarks/cargo/tests/dep-info.rs create mode 100644 collector/compile-benchmarks/cargo/tests/directory.rs create mode 100644 collector/compile-benchmarks/cargo/tests/doc.rs create mode 100644 collector/compile-benchmarks/cargo/tests/features.rs create mode 100644 collector/compile-benchmarks/cargo/tests/fetch.rs create mode 100644 collector/compile-benchmarks/cargo/tests/freshness.rs create mode 100644 collector/compile-benchmarks/cargo/tests/generate-lockfile.rs create mode 100644 collector/compile-benchmarks/cargo/tests/git.rs create mode 100644 collector/compile-benchmarks/cargo/tests/init.rs create mode 100644 collector/compile-benchmarks/cargo/tests/install.rs create mode 100644 collector/compile-benchmarks/cargo/tests/jobserver.rs create mode 100644 collector/compile-benchmarks/cargo/tests/local-registry.rs create mode 100644 collector/compile-benchmarks/cargo/tests/lockfile-compat.rs create mode 100644 collector/compile-benchmarks/cargo/tests/login.rs create mode 100644 collector/compile-benchmarks/cargo/tests/metadata.rs create mode 100644 collector/compile-benchmarks/cargo/tests/net-config.rs create mode 100644 collector/compile-benchmarks/cargo/tests/new.rs create mode 100644 collector/compile-benchmarks/cargo/tests/overrides.rs create mode 100644 collector/compile-benchmarks/cargo/tests/package.rs create mode 100644 collector/compile-benchmarks/cargo/tests/patch.rs create mode 100644 collector/compile-benchmarks/cargo/tests/path.rs create mode 100644 collector/compile-benchmarks/cargo/tests/plugins.rs create mode 100644 collector/compile-benchmarks/cargo/tests/proc-macro.rs create mode 100644 collector/compile-benchmarks/cargo/tests/profiles.rs create mode 100644 collector/compile-benchmarks/cargo/tests/publish.rs create mode 100644 collector/compile-benchmarks/cargo/tests/read-manifest.rs create mode 100644 collector/compile-benchmarks/cargo/tests/registry.rs create mode 100644 collector/compile-benchmarks/cargo/tests/required-features.rs create mode 100644 collector/compile-benchmarks/cargo/tests/resolve.rs create mode 100644 collector/compile-benchmarks/cargo/tests/run.rs create mode 100644 collector/compile-benchmarks/cargo/tests/rustc.rs create mode 100644 collector/compile-benchmarks/cargo/tests/rustdoc.rs create mode 100644 collector/compile-benchmarks/cargo/tests/rustdocflags.rs create mode 100644 collector/compile-benchmarks/cargo/tests/rustflags.rs create mode 100644 collector/compile-benchmarks/cargo/tests/search.rs create mode 100644 collector/compile-benchmarks/cargo/tests/small-fd-limits.rs create mode 100644 collector/compile-benchmarks/cargo/tests/test.rs create mode 100644 collector/compile-benchmarks/cargo/tests/tool-paths.rs create mode 100644 collector/compile-benchmarks/cargo/tests/verify-project.rs create mode 100644 collector/compile-benchmarks/cargo/tests/version.rs create mode 100644 collector/compile-benchmarks/cargo/tests/warn-on-failure.rs create mode 100644 collector/compile-benchmarks/cargo/tests/workspaces.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/.gitignore create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/.travis.yml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/LICENSE-APACHE create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/LICENSE-MIT create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/Makefile create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/README.md create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/UPGRADING.md create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/appveyor.yml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/docs/.nojekyll create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/docs/404.html create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/docs/index.html create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/fuzz/.gitignore create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/fuzz/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/fuzz/fuzzers/parse.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/github.png create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/IdnaMappingTable.txt create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/lib.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/make_uts46_mapping_table.py create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/punycode.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/uts46.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/src/uts46_mapping_table.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/IdnaTest.txt create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/punycode.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/punycode_tests.json create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/tests.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/unit.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/idna/tests/uts46.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/percent_encoding/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/percent_encoding/lib.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/rust-url-todo create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/encoding.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/form_urlencoded.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/host.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/lib.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/origin.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/parser.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/path_segments.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/quirks.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/src/slicing.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/tests/data.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/tests/setters_tests.json create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/tests/unit.rs create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/tests/urltestdata.json create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/url_serde/Cargo.toml create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/url_serde/README.md create mode 100644 collector/compile-benchmarks/cargo/url-1.5.1/url_serde/src/lib.rs diff --git a/collector/compile-benchmarks/README.md b/collector/compile-benchmarks/README.md index bfd2b7c86..b59648a7a 100644 --- a/collector/compile-benchmarks/README.md +++ b/collector/compile-benchmarks/README.md @@ -140,6 +140,9 @@ Rust code being written today. - **encoding**: An old crate providing character encoding support. Contains some large tables. +- **cargo**: An old version of Cargo, corresponding to the 1.24.0 Rust release. + Two of its dependencies (`socket2` and `url`) had to be vendored, to provide patches + so that it can compile with old rustc. - **futures**: v0.1.0 of the popular `futures` crate, which was used by many Rust programs. Newer versions of this crate (e.g. v0.3.21 from February 2021) contain very little code, instead relying on sub-crates. This makes them less diff --git a/collector/compile-benchmarks/REUSE.toml b/collector/compile-benchmarks/REUSE.toml index e16ba1dc9..e038422e1 100644 --- a/collector/compile-benchmarks/REUSE.toml +++ b/collector/compile-benchmarks/REUSE.toml @@ -27,6 +27,21 @@ path = "bitmaps-3.2.1-new-solver/**" SPDX-License-Identifier = "MPL-2.0" SPDX-FileCopyrightText = "Bodil Stokke" +[[annotations]] +path = "cargo/**" +SPDX-FileCopyrightText = "The Rust Project Developers (see https://thanks.rust-lang.org)" +SPDX-License-Identifier = "MIT OR Apache-2.0" + +[[annotations]] +path = "cargo/socket2-0.2.3/**" +SPDX-FileCopyrightText = "Alex Crichton" +SPDX-License-Identifier = "MIT OR Apache-2.0" + +[[annotations]] +path = "cargo/url-1.5.1/**" +SPDX-FileCopyrightText = "The rust-url developers" +SPDX-License-Identifier = "MIT OR Apache-2.0" + [[annotations]] path = "cargo-0.87.1/**" SPDX-FileCopyrightText = "The Rust Project Developers (see https://thanks.rust-lang.org)" diff --git a/collector/compile-benchmarks/cargo/.gitignore b/collector/compile-benchmarks/cargo/.gitignore new file mode 100644 index 000000000..df490beb2 --- /dev/null +++ b/collector/compile-benchmarks/cargo/.gitignore @@ -0,0 +1,14 @@ +/target +.cargo +/config.stamp +/Makefile +/config.mk +src/doc/build +src/etc/*.pyc +src/registry/target +src/registry/Cargo.lock +rustc +__pycache__ +.idea/ +*.iml +*.swp diff --git a/collector/compile-benchmarks/cargo/.travis.yml b/collector/compile-benchmarks/cargo/.travis.yml new file mode 100644 index 000000000..c2f7ca36c --- /dev/null +++ b/collector/compile-benchmarks/cargo/.travis.yml @@ -0,0 +1,60 @@ +language: rust +rust: stable +sudo: required +dist: trusty + +git: + depth: 1 + +cache: + directories: + - $HOME/.cargo/bin/ + +matrix: + include: + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + - env: TARGET=x86_64-apple-darwin + ALT=i686-apple-darwin + os: osx + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: beta + + - env: TARGET=x86_64-unknown-linux-gnu + ALT=i686-unknown-linux-gnu + rust: nightly + install: + - mdbook --help || cargo install mdbook --force + script: + - cargo test + - cargo doc --no-deps + - sh src/ci/dox.sh + after_success: | + [ $TRAVIS_BRANCH = master ] && + [ $TRAVIS_PULL_REQUEST = false ] && + [ $(uname -s) = Linux ] && + pip install ghp-import --user && + $HOME/.local/bin/ghp-import -n target/doc && + git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages 2>&1 >/dev/null + + exclude: + - rust: stable + +before_script: + - rustup target add $ALT +script: + - cargo test + +env: + global: + - secure: "hWheSLilMM4DXChfSy2XsDlLw338X2o+fw8bE590xxU2TzngFW8GUfq7lGfZEp/l4SNNIS6ROU/igyttCZtxZMANZ4aMQZR5E8Fp4yPOyE1pZLDH/LdQVXnROsfburQJeq+GIYIbZ01Abzh5ClpgLg5KX0H627uj063zZ7Ljo/w=" + +notifications: + email: + on_success: never +addons: + apt: + packages: + - gcc-multilib diff --git a/collector/compile-benchmarks/cargo/0-println.patch b/collector/compile-benchmarks/cargo/0-println.patch new file mode 100644 index 000000000..d3cff140e --- /dev/null +++ b/collector/compile-benchmarks/cargo/0-println.patch @@ -0,0 +1,13 @@ +diff --git a/src/cargo/lib.rs b/src/cargo/lib.rs +index f20118b8..350d8e47 100755 +--- a/src/cargo/lib.rs ++++ b/src/cargo/lib.rs +@@ -98,6 +98,8 @@ impl fmt::Display for VersionInfo { + } + }; + ++ println!("testing"); ++ + if let Some(ref cfg) = self.cfg_info { + if let Some(ref ci) = cfg.commit_info { + write!(f, " ({} {})", diff --git a/collector/compile-benchmarks/cargo/ARCHITECTURE.md b/collector/compile-benchmarks/cargo/ARCHITECTURE.md new file mode 100644 index 000000000..11e3e1218 --- /dev/null +++ b/collector/compile-benchmarks/cargo/ARCHITECTURE.md @@ -0,0 +1,90 @@ +# Cargo Architecture + +This document gives a high level overview of Cargo internals. You may +find it useful if you want to contribute to Cargo or if you are +interested in the inner workings of Cargo. + + +## Subcommands + +Cargo is organized as a set of subcommands. All subcommands live in +`src/bin` directory. However, only `src/bin/cargo.rs` file produces an +executable, other files inside the `bin` directory are submodules. See +`src/bin/cargo.rs` for how these subcommands get wired up with the +main executable. + +A typical subcommand, such as `src/bin/build.rs`, parses command line +options, reads the configuration files, discovers the Cargo project in +the current directory and delegates the actual implementation to one +of the functions in `src/cargo/ops/mod.rs`. This short file is a good +place to find out about most of the things that Cargo can do. + + +## Important Data Structures + +There are some important data structures which are used throughout +Cargo. + +`Config` is available almost everywhere and holds "global" +information, such as `CARGO_HOME` or configuration from +`.cargo/config` files. The `shell` method of `Config` is the entry +point for printing status messages and other info to the console. + +`Workspace` is the description of the workspace for the current +working directory. Each workspace contains at least one +`Package`. Each package corresponds to a single `Cargo.toml`, and may +define several `Target`s, such as the library, binaries, integration +test or examples. Targets are crates (each target defines a crate +root, like `src/lib.rs` or `examples/foo.rs`) and are what is actually +compiled by `rustc`. + +A typical package defines the single library target and several +auxiliary ones. Packages are a unit of dependency in Cargo, and when +package `foo` depends on package `bar`, that means that each target +from `foo` needs the library target from `bar`. + +`PackageId` is the unique identifier of a (possibly remote) +package. It consist of three components: name, version and source +id. Source is the place where the source code for package comes +from. Typical sources are crates.io, a git repository or a folder on +the local hard drive. + +`Resolve` is the representation of a directed acyclic graph of package +dependencies, which uses `PackageId`s for nodes. This is the data +structure that is saved to the lock file. If there is no lockfile, +Cargo constructs a resolve by finding a graph of packages which +matches declared dependency specification according to semver. + + +## Persistence + +Cargo is a non-daemon command line application, which means that all +the information used by Cargo must be persisted on the hard drive. The +main sources of information are `Cargo.toml` and `Cargo.lock` files, +`.cargo/config` configuration files and the globally shared registry +of packages downloaded from crates.io, usually located at +`~/.cargo/registry`. See `src/sources/registry` for the specifics of +the registry storage format. + + +## Concurrency + +Cargo is mostly single threaded. The only concurrency inside a single +instance of Cargo happens during compilation, when several instances +of `rustc` are invoked in parallel to build independent +targets. However there can be several different instances of Cargo +process running concurrently on the system. Cargo guarantees that this +is always safe by using file locks when accessing potentially shared +data like the registry or the target directory. + + +## Tests + +Cargo has an impressive test suite located in the `tests` folder. Most +of the test are integration: a project structure with `Cargo.toml` and +rust source code is created in a temporary directory, `cargo` binary +is invoked via `std::process::Command` and then stdout and stderr are +verified against the expected output. To simplify testing, several +macros of the form `[MACRO]` are used in the expected output. For +example, `[..]` matches any string and `[/]` matches `/` on Unixes and +`\` on windows. diff --git a/collector/compile-benchmarks/cargo/CONTRIBUTING.md b/collector/compile-benchmarks/cargo/CONTRIBUTING.md new file mode 100644 index 000000000..eb7374ee6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/CONTRIBUTING.md @@ -0,0 +1,173 @@ +# Contributing to Cargo + +Thank you for your interest in contributing to Cargo! Good places to +start are this document, [ARCHITECTURE.md](ARCHITECTURE.md), which +describes the high-level structure of Cargo and [E-easy] bugs on the +issue tracker. + +If you have a general question about Cargo or it's internals, feel free to ask +on [IRC]. + +## Code of Conduct + +All contributors are expected to follow our [Code of Conduct]. + +## Bug reports + +We can't fix what we don't know about, so please report problems liberally. This +includes problems with understanding the documentation, unhelpful error messages +and unexpected behavior. + +**If you think that you have identified an issue with Cargo that might compromise +its users' security, please do not open a public issue on GitHub. Instead, +we ask you to refer to Rust's [security policy].** + +Opening an issue is as easy as following [this +link][new-issues] and filling out the fields. +Here's a template that you can use to file an issue, though it's not necessary to +use it exactly: + + + + I tried this: + + I expected to see this happen: + + Instead, this happened: + + I'm using + +All three components are important: what you did, what you expected, what +happened instead. Please use https://gist.github.com/ if your examples run long. + +## Working on issues + +If you're looking for somewhere to start, check out the [E-easy][E-Easy] tag. + +Feel free to ask for guidelines on how to tackle a problem on [IRC] or open a +[new issue][new-issues]. This is especially important if you want to add new +features to Cargo or make large changes to the already existing code-base. +Cargo's core developers will do their best to provide help. + +If you start working on an already-filed issue, post a comment on this issue to +let people know that somebody is working it. Feel free to ask for comments if +you are unsure about the solution you would like to submit. + +While Cargo does make use of some Rust-features available only through the +`nightly` toolchain, it must compile on stable Rust. Code added to Cargo +is encouraged to make use of the latest stable features of the language and +`stdlib`. + +We use the "fork and pull" model [described here][development-models], where +contributors push changes to their personal fork and create pull requests to +bring those changes into the source repository. This process is partly +automated: Pull requests are made against Cargo's master-branch, tested and +reviewed. Once a change is approved to be merged, a friendly bot merges the +changes into an internal branch, runs the full test-suite on that branch +and only then merges into master. This ensures that Cargo's master branch +passes the test-suite at all times. + +Your basic steps to get going: + +* Fork Cargo and create a branch from master for the issue you are working on. +* Please adhere to the code style that you see around the location you are +working on. +* [Commit as you go][githelp]. +* Include tests that cover all non-trivial code. The existing tests +in `test/` provide templates on how to test Cargo's behavior in a +sandbox-environment. The internal crate `cargotest` provides a vast amount +of helpers to minimize boilerplate. +* Make sure `cargo test` passes. If you do not have the cross-compilers +installed locally, ignore the cross-compile test failures or disable them by +using `CFG_DISABLE_CROSS_TESTS=1 cargo test`. Note that some tests are enabled +only on `nightly` toolchain. If you can, test both toolchains. +* Push your commits to GitHub and create a pull request against Cargo's +`master` branch. + +## Pull requests + +After the pull request is made, a friendly bot will automatically assign a +reviewer; the review-process will make sure that the proposed changes are +sound. Please give the assigned reviewer sufficient time, especially during +weekends. If you don't get a reply, you may poke the core developers on [IRC]. + +A merge of Cargo's master-branch and your changes is immediately queued +to be tested after the pull request is made. In case unforeseen +problems are discovered during this step (e.g. a failure on a platform you +originally did not develop on), you may ask for guidance. Push additional +commits to your branch to tackle these problems. + +The reviewer might point out changes deemed necessary. Please add them as +extra commits; this ensures that the reviewer can see what has changed since +the code was previously reviewed. Large or tricky changes may require several +passes of review and changes. + +Once the reviewer approves your pull request, a friendly bot picks it up +and [merges][mergequeue] it into Cargo's `master` branch. + +## Contributing to the documentation + +To contribute to the documentation, all you need to do is change the markdown +files in the `src/doc` directory. To view the rendered version of changes you +have made locally, run: + +```sh +sh src/ci/dox.sh +open target/doc/index.html +``` + + +## Issue Triage + +Sometimes an issue will stay open, even though the bug has been fixed. And +sometimes, the original bug may go stale because something has changed in the +meantime. + +It can be helpful to go through older bug reports and make sure that they are +still valid. Load up an older issue, double check that it's still true, and +leave a comment letting us know if it is or is not. The [least recently +updated sort][lru] is good for finding issues like this. + +Contributors with sufficient permissions on the Rust-repository can help by +adding labels to triage issues: + +* Yellow, **A**-prefixed labels state which **area** of the project an issue + relates to. + +* Magenta, **B**-prefixed labels identify bugs which are **blockers**. + +* Light purple, **C**-prefixed labels represent the **category** of an issue. + +* Dark purple, **Command**-prefixed labels mean the issue has to do with a + specific cargo command. + +* Green, **E**-prefixed labels explain the level of **experience** or + **effort** necessary to fix the issue. [**E-mentor**][E-mentor] issues also + have some instructions on how to get started. + +* Red, **I**-prefixed labels indicate the **importance** of the issue. The + [I-nominated][inom] label indicates that an issue has been nominated for + prioritizing at the next triage meeting. + +* Purple gray, **O**-prefixed labels are the **operating system** or platform + that this issue is specific to. + +* Orange, **P**-prefixed labels indicate a bug's **priority**. These labels + are only assigned during triage meetings and replace the [I-nominated][inom] + label. + +* The light orange **relnotes** label marks issues that should be documented in + the release notes of the next release. + + +[githelp]: https://dont-be-afraid-to-commit.readthedocs.io/en/latest/git/commandlinegit.html +[development-models]: https://help.github.com/articles/about-collaborative-development-models/ +[gist]: https://gist.github.com/ +[new-issues]: https://github.com/rust-lang/cargo/issues/new +[mergequeue]: https://buildbot2.rust-lang.org/homu/queue/cargo +[security policy]: https://www.rust-lang.org/security.html +[lru]: https://github.com/rust-lang/cargo/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-asc +[E-easy]: https://github.com/rust-lang/cargo/labels/E-easy +[E-mentor]: https://github.com/rust-lang/cargo/labels/E-mentor +[Code of Conduct]: https://www.rust-lang.org/conduct.html +[IRC]: https://kiwiirc.com/client/irc.mozilla.org/cargo diff --git a/collector/compile-benchmarks/cargo/Cargo.lock b/collector/compile-benchmarks/cargo/Cargo.lock new file mode 100644 index 000000000..714bd6e1d --- /dev/null +++ b/collector/compile-benchmarks/cargo/Cargo.lock @@ -0,0 +1,1185 @@ +[root] +name = "cargo" +version = "0.23.0" +dependencies = [ + "atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "cargotest 0.1.0", + "core-foundation 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crates-io 0.12.0", + "crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", + "git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "home 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "advapi32-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "aho-corasick" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "aho-corasick" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "atty" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bitflags" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bitflags" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bufstream" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cargotest" +version = "0.1.0" +dependencies = [ + "cargo 0.23.0", + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", + "hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cc" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cmake" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "commoncrypto" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "commoncrypto-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "conv" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "core-foundation-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "core-foundation-sys" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crates-io" +version = "0.12.0" +dependencies = [ + "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crossbeam" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crypto-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "curl" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "socket2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "curl-sys" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "custom_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "dbghelp-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "docopt" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dtoa" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "env_logger" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "error-chain" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "filetime" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "flate2" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fnv" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "foreign-types" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fs2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "git2" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "git2-curl" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "glob" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "globset" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hamcrest" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hex" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "home" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "userenv-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ignore" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "globset 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itoa" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "jobserver" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.31" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libgit2-sys" +version = "0.6.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libssh2-sys" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "libz-sys" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "magenta" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "magenta-sys" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "matches" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memchr" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miniz-sys" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "net2" +version = "0.2.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-bigint 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "num-complex 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", + "num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-bigint" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-complex" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-integer" +version = "0.1.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-iter" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-rational" +version = "0.1.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "num-bigint 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-traits" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "num_cpus" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "openssl" +version = "0.9.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)", + "foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "openssl-probe" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "openssl-sys" +version = "0.9.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "percent-encoding" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pkg-config" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "psapi-sys" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redox_syscall" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "redox_termios" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "regex-syntax" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-demangle" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-serialize" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "same-file" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "scoped-tls" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "scopeguard" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "semver" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde_derive" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive_internals 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive_internals" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_ignored" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "shell-escape" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "socket2" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "strsim" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tar" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tempdir" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termcolor" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termion" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread-id" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unreachable" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "url" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "userenv-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "utf8-ranges" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "utf8-ranges" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "vcpkg" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "walkdir" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wincolor" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum advapi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e06588080cb19d0acb6739808aafa5f26bfb2ca015b2b6370028b44cf7cb8a9a" +"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66" +"checksum aho-corasick 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "500909c4f87a9e52355b26626d890833e9e1d53ac566db76c36faa984b889699" +"checksum atty 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "21e50800ec991574876040fff8ee46b136a53e985286fbe6a3bdfe6421b78860" +"checksum backtrace 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99f2ce94e22b8e664d95c57fff45b98a966c2252b60691d0b7aeeccd88d70983" +"checksum backtrace-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "c63ea141ef8fdb10409d0f5daf30ac51f84ef43bff66f16627773d2a292cd189" +"checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d" +"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" +"checksum bufstream 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f2f382711e76b9de6c744cc00d0497baba02fb00a787f088c879f01d09468e32" +"checksum cc 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7db2f146208d7e0fbee761b09cd65a7f51ccc38705d4e7262dad4d73b12a76b1" +"checksum cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" +"checksum cmake 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "357c07e7a1fc95732793c1edb5901e1a1f305cfcf63a90eb12dbd22bdb6b789d" +"checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007" +"checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2" +"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299" +"checksum core-foundation 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5909502e547762013619f4c4e01cc7393c20fe2d52d7fa471c1210adb2320dc7" +"checksum core-foundation-sys 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bc9fb3d6cb663e6fd7cf1c63f9b144ee2b1e4a78595a0451dd34bff85b9a3387" +"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97" +"checksum crossbeam 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8837ab96533202c5b610ed44bc7f4183e7957c1c8f56e8cc78bb098593c8ba0a" +"checksum crypto-hash 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "34903878eec1694faf53cae8473a088df333181de421d4d3d48061d6559fe602" +"checksum curl 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7034c534a1d7d22f7971d6088aa9d281d219ef724026c3428092500f41ae9c2c" +"checksum curl-sys 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4bee31aa3a079d5f3ff9579ea4dcfb1b1a17a40886f5f467436d383e78134b55" +"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9" +"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850" +"checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a" +"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" +"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" +"checksum filetime 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "6ab199bf38537c6f38792669e081e0bb278b9b7405bba2642e4e5d15bf732c0e" +"checksum flate2 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)" = "e6234dd4468ae5d1e2dbb06fe2b058696fdc50a339c68a393aefbf00bc81e423" +"checksum fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6cc484842f1e2884faf56f529f960cc12ad8c71ce96cc7abba0a067c98fee344" +"checksum foreign-types 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3e4056b9bd47f8ac5ba12be771f77a0dae796d1bbaaf5fd0b9c2d38b69b8a29d" +"checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866" +"checksum git2 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0c1c0203d653f4140241da0c1375a404f0a397249ec818cd2076c6280c50f6fa" +"checksum git2-curl 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "68676bc784bf0bef83278898929bf64a251e87c0340723d0b93fa096c9c5bf8e" +"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum globset 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "feeb1b6840809ef5efcf7a4a990bc4e1b7ee3df8cf9e2379a75aeb2ba42ac9c3" +"checksum hamcrest 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bf088f042a467089e9baa4972f57f9247e42a0cc549ba264c7a04fbb8ecb89d4" +"checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" +"checksum home 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9f25ae61099d8f3fee8b483df0bd4ecccf4b2731897aad40d50eca1b641fe6db" +"checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" +"checksum ignore 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b3fcaf2365eb14b28ec7603c98c06cc531f19de9eb283d89a3dff8417c8c99f5" +"checksum itoa 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8324a32baf01e2ae060e9de58ed0bc2320c9a2833491ee36cd3b4c414de4db8c" +"checksum jobserver 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "443ae8bc0af6c106e6e8b77e04684faecc1a5ce94e058f4c2b0a037b0ea1b133" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum lazy_static 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c9e5e58fa1a4c3b915a561a78a22ee0cac6ab97dca2504428bc1cb074375f8d5" +"checksum libc 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "d1419b2939a0bc44b77feb34661583c7546b532b192feab36249ab584b86856c" +"checksum libgit2-sys 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "6f74b4959cef96898f5123148724fc7dee043b9a6b99f219d948851bfbe53cb2" +"checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75" +"checksum libz-sys 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "44ebbc760fd2d2f4d93de09a0e13d97e057612052e871da9985cedcb451e6bd5" +"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b" +"checksum magenta 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4bf0336886480e671965f794bc9b6fce88503563013d1bfb7a502c81fe3ac527" +"checksum magenta-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40d014c7011ac470ae28e2f76a02bfea4a8480f73e701353b49ad7a8d75f4699" +"checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376" +"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" +"checksum memchr 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1dbccc0e46f1ea47b9f17e6d67c5a96bd27030519c519c9c91327e31275a47b4" +"checksum miniz-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "609ce024854aeb19a0ef7567d348aaa5a746b32fb72e336df7fcc16869d7e2b4" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +"checksum net2 0.2.31 (registry+https://github.com/rust-lang/crates.io-index)" = "3a80f842784ef6c9a958b68b7516bc7e35883c614004dd94959a4dca1b716c09" +"checksum num 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "a311b77ebdc5dd4cf6449d81e4135d9f0e3b153839ac90e648a8ef538f923525" +"checksum num-bigint 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "8fd0f8dbb4c0960998958a796281d88c16fbe68d87b1baa6f31e2979e81fd0bd" +"checksum num-complex 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "503e668405c5492d67cf662a81e05be40efe2e6bcf10f7794a07bd9865e704e6" +"checksum num-integer 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "d1452e8b06e448a07f0e6ebb0bb1d92b8890eea63288c0b627331d53514d0fba" +"checksum num-iter 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)" = "7485fcc84f85b4ecd0ea527b14189281cf27d60e583ae65ebc9c088b13dffe01" +"checksum num-rational 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)" = "288629c76fac4b33556f4b7ab57ba21ae202da65ba8b77466e6d598e31990790" +"checksum num-traits 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "99843c856d68d8b4313b03a17e33c4bb42ae8f6610ea81b28abe076ac721b9b0" +"checksum num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "514f0d73e64be53ff320680ca671b64fe3fb91da01e1ae2ddc99eb51d453b20d" +"checksum openssl 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)" = "816914b22eb15671d62c73442a51978f311e911d6a6f6cbdafa6abce1b5038fc" +"checksum openssl-probe 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d98df0270d404ccd3c050a41d579c52d1db15375168bb3471e04ec0f5f378daf" +"checksum openssl-sys 0.9.19 (registry+https://github.com/rust-lang/crates.io-index)" = "1e4c63a7d559c1e5afa6d6a9e6fa34bbc5f800ffc9ae08b72c605420b0c4f5e8" +"checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356" +"checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903" +"checksum psapi-sys 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "abcd5d1a07d360e29727f757a9decb3ce8bc6e0efa8969cfaad669a8317a2478" +"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" +"checksum rand 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "eb250fd207a4729c976794d03db689c9be1d634ab5a1c9da9492a13d8fecbcdf" +"checksum redox_syscall 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "8dde11f18c108289bef24469638a04dce49da56084f2d50618b226e47eb04509" +"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" +"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f" +"checksum regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1731164734096285ec2a5ec7fea5248ae2f5485b3feeb0115af4fda2183b2d1b" +"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957" +"checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum rustc-demangle 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "aee45432acc62f7b9a108cc054142dac51f979e69e71ddce7d6fc7adf29e817e" +"checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +"checksum same-file 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d931a44fdaa43b8637009e7632a02adc4f2b2e0733c08caa4cf00e8da4a117a7" +"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d" +"checksum scopeguard 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "59a076157c1e2dc561d8de585151ee6965d910dd4dcb5dabb7ae3e83981a6c57" +"checksum semver 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bee2bc909ab2d8d60dab26e8cad85b25d795b14603a0dcb627b78b9d30b6454b" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum serde 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "6a7046c9d4c6c522d10b2d098f9bebe2bef227e0e74044d8c1bfcf6b476af799" +"checksum serde_derive 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "1afcaae083fd1c46952a315062326bc9957f182358eb7da03b57ef1c688f7aa9" +"checksum serde_derive_internals 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bd381f6d01a6616cdba8530492d453b7761b456ba974e98768a18cad2cd76f58" +"checksum serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "190e9765dcedb56be63b6e0993a006c7e3b071a016a304736e4a315dc01fb142" +"checksum serde_json 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d243424e06f9f9c39e3cd36147470fd340db785825e367625f79298a6ac6b7ac" +"checksum shell-escape 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "dd5cc96481d54583947bfe88bf30c23d53f883c6cd0145368b69989d97b84ef8" +"checksum socket2 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9e76b159741052c7deaa9fd0b5ca6b5f79cecf525ed665abfe5002086c6b2791" +"checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694" +"checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +"checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +"checksum tar 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "281285b717926caa919ad905ef89c63d75805c7d89437fb873100925a53f2b1b" +"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" +"checksum termcolor 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9065bced9c3e43453aa3d56f1e98590b8455b341d2fa191a1090c0dd0b242c75" +"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" +"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03" +"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5" +"checksum thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1697c4b57aeeb7a536b647165a2825faddffb1d3bad386d507709bd51a90bb14" +"checksum toml 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a7540f4ffc193e0d3c94121edb19b055670d369f77d5804db11ae053a45b6e7e" +"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +"checksum unicode-normalization 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f" +"checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" +"checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" +"checksum url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eeb819346883532a271eb626deb43c4a1bb4c4dd47c519bd78137c3e72a4fe27" +"checksum userenv-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "71d28ea36bbd9192d75bd9fa9b39f96ddb986eaee824adae5d53b6e51919b2f3" +"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" +"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122" +"checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" +"checksum wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a39ee4464208f6430992ff20154216ab2357772ac871d994c51628d60e58b8b0" +"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" diff --git a/collector/compile-benchmarks/cargo/Cargo.toml b/collector/compile-benchmarks/cargo/Cargo.toml new file mode 100644 index 000000000..013558386 --- /dev/null +++ b/collector/compile-benchmarks/cargo/Cargo.toml @@ -0,0 +1,81 @@ +[package] +name = "cargo" +version = "0.23.0" +authors = ["Yehuda Katz ", + "Carl Lerche ", + "Alex Crichton "] +license = "MIT/Apache-2.0" +homepage = "https://crates.io" +repository = "https://github.com/rust-lang/cargo" +documentation = "https://docs.rs/cargo" +description = """ +Cargo, a package manager for Rust. +""" + +[lib] +name = "cargo" +path = "src/cargo/lib.rs" + +[dependencies] +atty = "0.2" +crates-io = { path = "src/crates-io", version = "0.12" } +crossbeam = "0.3" +crypto-hash = "0.3" +curl = "0.4.6" +docopt = "0.8.1" +env_logger = "0.4" +error-chain = "0.11.0-rc.2" +filetime = "0.1" +flate2 = "0.2" +fs2 = "0.4" +git2 = "0.6" +git2-curl = "0.7" +glob = "0.2" +hex = "0.2" +home = "0.3" +ignore = "^0.2.2" +jobserver = "0.1.6" +libc = "=0.2.54" +libgit2-sys = "0.6" +log = "0.3" +num_cpus = "1.0" +same-file = "0.1" +scoped-tls = "0.1" +semver = { version = "0.8.0", features = ["serde"] } +serde = "1.0" +serde_derive = "1.0" +serde_ignored = "0.0.4" +serde_json = "1.0" +shell-escape = "0.1" +tar = { version = "0.4", default-features = false } +tempdir = "0.3" +termcolor = "0.3" +toml = "0.4" +#url = "1.1" +url = { path = "url-1.5.1" } + +[target.'cfg(target_os = "macos")'.dependencies] +core-foundation = { version = "0.4.4", features = ["mac_os_10_7_support"] } + +[target.'cfg(windows)'.dependencies] +kernel32-sys = "0.2" +miow = "0.2" +psapi-sys = "0.1" +winapi = "0.2" + +[dev-dependencies] +bufstream = "0.1" +cargotest = { path = "tests/cargotest" } +filetime = "0.1" +hamcrest = "=0.1.1" + +[[bin]] +name = "cargo" +test = false +doc = false + +[workspace] + +[patch.crates-io] +url = { path = "url-1.5.1" } +socket2 = { path = "socket2-0.2.3" } diff --git a/collector/compile-benchmarks/cargo/LICENSE-APACHE b/collector/compile-benchmarks/cargo/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/collector/compile-benchmarks/cargo/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/collector/compile-benchmarks/cargo/LICENSE-MIT b/collector/compile-benchmarks/cargo/LICENSE-MIT new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/collector/compile-benchmarks/cargo/LICENSE-MIT @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/collector/compile-benchmarks/cargo/LICENSE-THIRD-PARTY b/collector/compile-benchmarks/cargo/LICENSE-THIRD-PARTY new file mode 100644 index 000000000..c9897b96f --- /dev/null +++ b/collector/compile-benchmarks/cargo/LICENSE-THIRD-PARTY @@ -0,0 +1,1272 @@ +The Cargo source code itself does not bundle any third party libraries, but it +depends on a number of libraries which carry their own copyright notices and +license terms. These libraries are normally all linked static into the binary +distributions of Cargo: + +* OpenSSL - http://www.openssl.org/source/license.html + + Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + 3. All advertising materials mentioning features or use of this + software must display the following acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + + 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + endorse or promote products derived from this software without + prior written permission. For written permission, please contact + openssl-core@openssl.org. + + 5. Products derived from this software may not be called "OpenSSL" + nor may "OpenSSL" appear in their names without prior written + permission of the OpenSSL Project. + + 6. Redistributions of any form whatsoever must retain the following + acknowledgment: + "This product includes software developed by the OpenSSL Project + for use in the OpenSSL Toolkit (http://www.openssl.org/)" + + THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + OF THE POSSIBILITY OF SUCH DAMAGE. + ==================================================================== + + This product includes cryptographic software written by Eric Young + (eay@cryptsoft.com). This product includes software written by Tim + Hudson (tjh@cryptsoft.com). + + --- + + Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + All rights reserved. + + This package is an SSL implementation written + by Eric Young (eay@cryptsoft.com). + The implementation was written so as to conform with Netscapes SSL. + + This library is free for commercial and non-commercial use as long as + the following conditions are aheared to. The following conditions + apply to all code found in this distribution, be it the RC4, RSA, + lhash, DES, etc., code; not just the SSL code. The SSL documentation + included with this distribution is covered by the same copyright terms + except that the holder is Tim Hudson (tjh@cryptsoft.com). + + Copyright remains Eric Young's, and as such any Copyright notices in + the code are not to be removed. + If this package is used in a product, Eric Young should be given attribution + as the author of the parts of the library used. + This can be in the form of a textual message at program startup or + in documentation (online or textual) provided with the package. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. All advertising materials mentioning features or use of this software + must display the following acknowledgement: + "This product includes cryptographic software written by + Eric Young (eay@cryptsoft.com)" + The word 'cryptographic' can be left out if the rouines from the library + being used are not cryptographic related :-). + 4. If you include any Windows specific code (or a derivative thereof) from + the apps directory (application code) you must include an acknowledgement: + "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + + THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. + + The licence and distribution terms for any publically available version or + derivative of this code cannot be changed. i.e. this code cannot simply be + copied and put under another distribution licence + [including the GNU Public Licence.] + +* libgit2 - https://github.com/libgit2/libgit2/blob/master/COPYING + + libgit2 is Copyright (C) the libgit2 contributors, + unless otherwise stated. See the AUTHORS file for details. + + Note that the only valid version of the GPL as far as this project + is concerned is _this_ particular version of the license (ie v2, not + v2.2 or v3.x or whatever), unless explicitly otherwise stated. + + ---------------------------------------------------------------------- + + LINKING EXCEPTION + + In addition to the permissions in the GNU General Public License, + the authors give you unlimited permission to link the compiled + version of this library into combinations with other programs, + and to distribute those combinations without any restriction + coming from the use of this file. (The General Public License + restrictions do apply in other respects; for example, they cover + modification of the file, and distribution when not linked into + a combined executable.) + + ---------------------------------------------------------------------- + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + License is intended to guarantee your freedom to share and change free + software--to make sure the software is free for all its users. This + General Public License applies to most of the Free Software + Foundation's software and to any other program whose authors commit to + using it. (Some other Free Software Foundation software is covered by + the GNU Library General Public License instead.) You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + this service if you wish), that you receive source code or can get it + if you want it, that you can change the software or use pieces of it + in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid + anyone to deny you these rights or to ask you to surrender the rights. + These restrictions translate to certain responsibilities for you if you + distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must give the recipients all the rights that + you have. You must make sure that they, too, receive or can get the + source code. And you must show them these terms so they know their + rights. + + We protect your rights with two steps: (1) copyright the software, and + (2) offer you this license which gives you legal permission to copy, + distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain + that everyone understands that there is no warranty for this free + software. If the software is modified by someone else and passed on, we + want its recipients to know that what they have is not the original, so + that any problems introduced by others will not reflect on the original + authors' reputations. + + Finally, any free program is threatened constantly by software + patents. We wish to avoid the danger that redistributors of a free + program will individually obtain patent licenses, in effect making the + program proprietary. To prevent this, we have made it clear that any + patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and + modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains + a notice placed by the copyright holder saying it may be distributed + under the terms of this General Public License. The "Program", below, + refers to any such program or work, and a "work based on the Program" + means either the Program or any derivative work under copyright law: + that is to say, a work containing the Program or a portion of it, + either verbatim or with modifications and/or translated into another + language. (Hereinafter, translation is included without limitation in + the term "modification".) Each licensee is addressed as "you". + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running the Program is not restricted, and the output from the Program + is covered only if its contents constitute a work based on the + Program (independent of having been made by running the Program). + Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's + source code as you receive it, in any medium, provided that you + conspicuously and appropriately publish on each copy an appropriate + copyright notice and disclaimer of warranty; keep intact all the + notices that refer to this License and to the absence of any warranty; + and give any other recipients of the Program a copy of this License + along with the Program. + + You may charge a fee for the physical act of transferring a copy, and + you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion + of it, thus forming a work based on the Program, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Program, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Program, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Program. + + In addition, mere aggregation of another work not based on the Program + with the Program (or with a work based on the Program) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, + under Section 2) in object code or executable form under the terms of + Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + + The source code for a work means the preferred form of the work for + making modifications to it. For an executable work, complete source + code means all the source code for all modules it contains, plus any + associated interface definition files, plus the scripts used to + control compilation and installation of the executable. However, as a + special exception, the source code distributed need not include + anything that is normally distributed (in either source or binary + form) with the major components (compiler, kernel, and so on) of the + operating system on which the executable runs, unless that component + itself accompanies the executable. + + If distribution of executable or object code is made by offering + access to copy from a designated place, then offering equivalent + access to copy the source code from the same place counts as + distribution of the source code, even though third parties are not + compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense or distribute the Program is + void, and will automatically terminate your rights under this License. + However, parties who have received copies, or rights, from you under + this License will not have their licenses terminated so long as such + parties remain in full compliance. + + 5. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Program or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Program (or any work based on the + Program), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the + Program), the recipient automatically receives a license from the + original licensor to copy, distribute or modify the Program subject to + these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties to + this License. + + 7. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Program at all. For example, if a patent + license would not permit royalty-free redistribution of the Program by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Program. + + If any portion of this section is held invalid or unenforceable under + any particular circumstance, the balance of the section is intended to + apply and the section as a whole is intended to apply in other + circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system, which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Program under this License + may add an explicit geographical distribution limitation excluding + those countries, so that distribution is permitted only in or among + countries not thus excluded. In such case, this License incorporates + the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions + of the General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the Program + specifies a version number of this License which applies to it and "any + later version", you have the option of following the terms and conditions + either of that version or of any later version published by the Free + Software Foundation. If the Program does not specify a version number of + this License, you may choose any version ever published by the Free Software + Foundation. + + 10. If you wish to incorporate parts of the Program into other free + programs whose distribution conditions are different, write to the author + to ask for permission. For software which is copyrighted by the Free + Software Foundation, write to the Free Software Foundation; we sometimes + make exceptions for this. Our decision will be guided by the two goals + of preserving the free status of all derivatives of our free software and + of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY + FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN + OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES + PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED + OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS + TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE + PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, + REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR + REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, + INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING + OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED + TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY + YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE + POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + + Also add information on how to contact you by electronic and paper mail. + + If the program is interactive, make it output a short notice like this + when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, the commands you use may + be called something other than `show w' and `show c'; they could even be + mouse-clicks or menu items--whatever suits your program. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the program, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + + This General Public License does not permit incorporating your program into + proprietary programs. If your program is a subroutine library, you may + consider it more useful to permit linking proprietary applications with the + library. If this is what you want to do, use the GNU Library General + Public License instead of this License. + + ---------------------------------------------------------------------- + + The bundled ZLib code is licensed under the ZLib license: + + Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + + ---------------------------------------------------------------------- + + The Clar framework is licensed under the MIT license: + + Copyright (C) 2011 by Vicent Marti + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + ---------------------------------------------------------------------- + + The regex library (deps/regex/) is licensed under the GNU LGPL + + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + [This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your + freedom to share and change it. By contrast, the GNU General Public + Licenses are intended to guarantee your freedom to share and change + free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some + specially designated software packages--typically libraries--of the + Free Software Foundation and other authors who decide to use it. You + can use it too, but we suggest you first think carefully about whether + this license or the ordinary General Public License is the better + strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, + not price. Our General Public Licenses are designed to make sure that + you have the freedom to distribute copies of free software (and charge + for this service if you wish); that you receive source code or can get + it if you want it; that you can change the software and use pieces of + it in new free programs; and that you are informed that you can do + these things. + + To protect your rights, we need to make restrictions that forbid + distributors to deny you these rights or to ask you to surrender these + rights. These restrictions translate to certain responsibilities for + you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis + or for a fee, you must give the recipients all the rights that we gave + you. You must make sure that they, too, receive or can get the source + code. If you link other code with the library, you must provide + complete object files to the recipients, so that they can relink them + with the library after making changes to the library and recompiling + it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the + library, and (2) we offer you this license, which gives you legal + permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that + there is no warranty for the free library. Also, if the library is + modified by someone else and passed on, the recipients should know + that what they have is not the original version, so that the original + author's reputation will not be affected by problems that might be + introduced by others. + + Finally, software patents pose a constant threat to the existence of + any free program. We wish to make sure that a company cannot + effectively restrict the users of a free program by obtaining a + restrictive license from a patent holder. Therefore, we insist that + any patent license obtained for a version of the library must be + consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the + ordinary GNU General Public License. This license, the GNU Lesser + General Public License, applies to certain designated libraries, and + is quite different from the ordinary General Public License. We use + this license for certain libraries in order to permit linking those + libraries into non-free programs. + + When a program is linked with a library, whether statically or using + a shared library, the combination of the two is legally speaking a + combined work, a derivative of the original library. The ordinary + General Public License therefore permits such linking only if the + entire combination fits its criteria of freedom. The Lesser General + Public License permits more lax criteria for linking other code with + the library. + + We call this license the "Lesser" General Public License because it + does Less to protect the user's freedom than the ordinary General + Public License. It also provides other free software developers Less + of an advantage over competing non-free programs. These disadvantages + are the reason we use the ordinary General Public License for many + libraries. However, the Lesser license provides advantages in certain + special circumstances. + + For example, on rare occasions, there may be a special need to + encourage the widest possible use of a certain library, so that it becomes + a de-facto standard. To achieve this, non-free programs must be + allowed to use the library. A more frequent case is that a free + library does the same job as widely used non-free libraries. In this + case, there is little to gain by limiting the free library to free + software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free + programs enables a greater number of people to use a large body of + free software. For example, permission to use the GNU C Library in + non-free programs enables many more people to use the whole GNU + operating system, as well as its variant, the GNU/Linux operating + system. + + Although the Lesser General Public License is Less protective of the + users' freedom, it does ensure that the user of a program that is + linked with the Library has the freedom and the wherewithal to run + that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and + modification follow. Pay close attention to the difference between a + "work based on the library" and a "work that uses the library". The + former contains code derived from the library, whereas the latter must + be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other + program which contains a notice placed by the copyright holder or + other authorized party saying it may be distributed under the terms of + this Lesser General Public License (also called "this License"). + Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data + prepared so as to be conveniently linked with application programs + (which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work + which has been distributed under these terms. A "work based on the + Library" means either the Library or any derivative work under + copyright law: that is to say, a work containing the Library or a + portion of it, either verbatim or with modifications and/or translated + straightforwardly into another language. (Hereinafter, translation is + included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for + making modifications to it. For a library, complete source code means + all the source code for all modules it contains, plus any associated + interface definition files, plus the scripts used to control compilation + and installation of the library. + + Activities other than copying, distribution and modification are not + covered by this License; they are outside its scope. The act of + running a program using the Library is not restricted, and output from + such a program is covered only if its contents constitute a work based + on the Library (independent of the use of the Library in a tool for + writing it). Whether that is true depends on what the Library does + and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's + complete source code as you receive it, in any medium, provided that + you conspicuously and appropriately publish on each copy an + appropriate copyright notice and disclaimer of warranty; keep intact + all the notices that refer to this License and to the absence of any + warranty; and distribute a copy of this License along with the + Library. + + You may charge a fee for the physical act of transferring a copy, + and you may at your option offer warranty protection in exchange for a + fee. + + 2. You may modify your copy or copies of the Library or any portion + of it, thus forming a work based on the Library, and copy and + distribute such modifications or work under the terms of Section 1 + above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + + These requirements apply to the modified work as a whole. If + identifiable sections of that work are not derived from the Library, + and can be reasonably considered independent and separate works in + themselves, then this License, and its terms, do not apply to those + sections when you distribute them as separate works. But when you + distribute the same sections as part of a whole which is a work based + on the Library, the distribution of the whole must be on the terms of + this License, whose permissions for other licensees extend to the + entire whole, and thus to each and every part regardless of who wrote + it. + + Thus, it is not the intent of this section to claim rights or contest + your rights to work written entirely by you; rather, the intent is to + exercise the right to control the distribution of derivative or + collective works based on the Library. + + In addition, mere aggregation of another work not based on the Library + with the Library (or with a work based on the Library) on a volume of + a storage or distribution medium does not bring the other work under + the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public + License instead of this License to a given copy of the Library. To do + this, you must alter all the notices that refer to this License, so + that they refer to the ordinary GNU General Public License, version 2, + instead of to this License. (If a newer version than version 2 of the + ordinary GNU General Public License has appeared, then you can specify + that version instead if you wish.) Do not make any other change in + these notices. + + Once this change is made in a given copy, it is irreversible for + that copy, so the ordinary GNU General Public License applies to all + subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of + the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or + derivative of it, under Section 2) in object code or executable form + under the terms of Sections 1 and 2 above provided that you accompany + it with the complete corresponding machine-readable source code, which + must be distributed under the terms of Sections 1 and 2 above on a + medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy + from a designated place, then offering equivalent access to copy the + source code from the same place satisfies the requirement to + distribute the source code, even though third parties are not + compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the + Library, but is designed to work with the Library by being compiled or + linked with it, is called a "work that uses the Library". Such a + work, in isolation, is not a derivative work of the Library, and + therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library + creates an executable that is a derivative of the Library (because it + contains portions of the Library), rather than a "work that uses the + library". The executable is therefore covered by this License. + Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file + that is part of the Library, the object code for the work may be a + derivative work of the Library even though the source code is not. + Whether this is true is especially significant if the work can be + linked without the Library, or if the work is itself a library. The + threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data + structure layouts and accessors, and small macros and small inline + functions (ten lines or less in length), then the use of the object + file is unrestricted, regardless of whether it is legally a derivative + work. (Executables containing this object code plus portions of the + Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may + distribute the object code for the work under the terms of Section 6. + Any executables containing that work also fall under Section 6, + whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or + link a "work that uses the Library" with the Library to produce a + work containing portions of the Library, and distribute that work + under terms of your choice, provided that the terms permit + modification of the work for the customer's own use and reverse + engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the + Library is used in it and that the Library and its use are covered by + this License. You must supply a copy of this License. If the work + during execution displays copyright notices, you must include the + copyright notice for the Library among them, as well as a reference + directing the user to the copy of this License. Also, you must do one + of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the + Library" must include any data and utility programs needed for + reproducing the executable from it. However, as a special exception, + the materials to be distributed need not include anything that is + normally distributed (in either source or binary form) with the major + components (compiler, kernel, and so on) of the operating system on + which the executable runs, unless that component itself accompanies + the executable. + + It may happen that this requirement contradicts the license + restrictions of other proprietary libraries that do not normally + accompany the operating system. Such a contradiction means you cannot + use both them and the Library together in an executable that you + distribute. + + 7. You may place library facilities that are a work based on the + Library side-by-side in a single library together with other library + facilities not covered by this License, and distribute such a combined + library, provided that the separate distribution of the work based on + the Library and of the other library facilities is otherwise + permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute + the Library except as expressly provided under this License. Any + attempt otherwise to copy, modify, sublicense, link with, or + distribute the Library is void, and will automatically terminate your + rights under this License. However, parties who have received copies, + or rights, from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not + signed it. However, nothing else grants you permission to modify or + distribute the Library or its derivative works. These actions are + prohibited by law if you do not accept this License. Therefore, by + modifying or distributing the Library (or any work based on the + Library), you indicate your acceptance of this License to do so, and + all its terms and conditions for copying, distributing or modifying + the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the + Library), the recipient automatically receives a license from the + original licensor to copy, distribute, link with or modify the Library + subject to these terms and conditions. You may not impose any further + restrictions on the recipients' exercise of the rights granted herein. + You are not responsible for enforcing compliance by third parties with + this License. + + 11. If, as a consequence of a court judgment or allegation of patent + infringement or for any other reason (not limited to patent issues), + conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot + distribute so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you + may not distribute the Library at all. For example, if a patent + license would not permit royalty-free redistribution of the Library by + all those who receive copies directly or indirectly through you, then + the only way you could satisfy both it and this License would be to + refrain entirely from distribution of the Library. + + If any portion of this section is held invalid or unenforceable under any + particular circumstance, the balance of the section is intended to apply, + and the section as a whole is intended to apply in other circumstances. + + It is not the purpose of this section to induce you to infringe any + patents or other property right claims or to contest validity of any + such claims; this section has the sole purpose of protecting the + integrity of the free software distribution system which is + implemented by public license practices. Many people have made + generous contributions to the wide range of software distributed + through that system in reliance on consistent application of that + system; it is up to the author/donor to decide if he or she is willing + to distribute software through any other system and a licensee cannot + impose that choice. + + This section is intended to make thoroughly clear what is believed to + be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in + certain countries either by patents or by copyrighted interfaces, the + original copyright holder who places the Library under this License may add + an explicit geographical distribution limitation excluding those countries, + so that distribution is permitted only in or among countries not thus + excluded. In such case, this License incorporates the limitation as if + written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new + versions of the Lesser General Public License from time to time. + Such new versions will be similar in spirit to the present version, + but may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the Library + specifies a version number of this License which applies to it and + "any later version", you have the option of following the terms and + conditions either of that version or of any later version published by + the Free Software Foundation. If the Library does not specify a + license version number, you may choose any version ever published by + the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free + programs whose distribution conditions are incompatible with these, + write to the author to ask for permission. For software which is + copyrighted by the Free Software Foundation, write to the Free + Software Foundation; we sometimes make exceptions for this. Our + decision will be guided by the two goals of preserving the free status + of all derivatives of our free software and of promoting the sharing + and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO + WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. + EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR + OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE + LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME + THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY + AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU + FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE + LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING + RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A + FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF + SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest + possible use to the public, we recommend making it free software that + everyone can redistribute and change. You can do so by permitting + redistribution under these terms (or, alternatively, under the terms of the + ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is + safest to attach them to the start of each source file to most effectively + convey the exclusion of warranty; and each file should have at least the + "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + Also add information on how to contact you by electronic and paper mail. + + You should also get your employer (if you work as a programmer) or your + school, if any, to sign a "copyright disclaimer" for the library, if + necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + + That's all there is to it! + + ---------------------------------------------------------------------- + +* libssh2 - http://www.libssh2.org/license.html + + Copyright (c) 2004-2007 Sara Golemon + Copyright (c) 2005,2006 Mikhail Gusarov + Copyright (c) 2006-2007 The Written Word, Inc. + Copyright (c) 2007 Eli Fant + Copyright (c) 2009 Daniel Stenberg + Copyright (C) 2008, 2009 Simon Josefsson + All rights reserved. + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + + Neither the name of the copyright holder nor the names + of any other contributors may be used to endorse or + promote products derived from this software without + specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + OF SUCH DAMAGE. + +* libcurl - http://curl.haxx.se/docs/copyright.html + + COPYRIGHT AND PERMISSION NOTICE + + Copyright (c) 1996 - 2014, Daniel Stenberg, daniel@haxx.se. + + All rights reserved. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, + DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + USE OR OTHER DEALINGS IN THE SOFTWARE. + + Except as contained in this notice, the name of a copyright holder shall not + be used in advertising or otherwise to promote the sale, use or other + dealings in this Software without prior written authorization of the + copyright holder. + +* flate2-rs - https://github.com/alexcrichton/flate2-rs/blob/master/LICENSE-MIT +* link-config - https://github.com/alexcrichton/link-config/blob/master/LICENSE-MIT +* openssl-static-sys - https://github.com/alexcrichton/openssl-static-sys/blob/master/LICENSE-MIT +* toml-rs - https://github.com/alexcrichton/toml-rs/blob/master/LICENSE-MIT +* libssh2-static-sys - https://github.com/alexcrichton/libssh2-static-sys/blob/master/LICENSE-MIT +* git2-rs - https://github.com/alexcrichton/git2-rs/blob/master/LICENSE-MIT +* tar-rs - https://github.com/alexcrichton/tar-rs/blob/master/LICENSE-MIT + + Copyright (c) 2014 Alex Crichton + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* glob - https://github.com/rust-lang/glob/blob/master/LICENSE-MIT +* semver - https://github.com/rust-lang/semver/blob/master/LICENSE-MIT + + Copyright (c) 2014 The Rust Project Developers + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-url - https://github.com/servo/rust-url/blob/master/LICENSE-MIT + + Copyright (c) 2006-2009 Graydon Hoare + Copyright (c) 2009-2013 Mozilla Foundation + + Permission is hereby granted, free of charge, to any + person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the + Software without restriction, including without + limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of + the Software, and to permit persons to whom the Software + is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice + shall be included in all copies or substantial portions + of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF + ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED + TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A + PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT + SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + +* rust-encoding - https://github.com/lifthrasiir/rust-encoding/blob/master/LICENSE.txt + + The MIT License (MIT) + + Copyright (c) 2013, Kang Seonghoon. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* curl-rust - https://github.com/carllerche/curl-rust/blob/master/LICENSE + + Copyright (c) 2014 Carl Lerche + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +* docopt.rs - https://github.com/docopt/docopt.rs/blob/master/UNLICENSE + + This is free and unencumbered software released into the public domain. + + Anyone is free to copy, modify, publish, use, compile, sell, or + distribute this software, either in source code form or as a compiled + binary, for any purpose, commercial or non-commercial, and by any + means. + + In jurisdictions that recognize copyright laws, the author or authors + of this software dedicate any and all copyright interest in the + software to the public domain. We make this dedication for the benefit + of the public at large and to the detriment of our heirs and + successors. We intend this dedication to be an overt act of + relinquishment in perpetuity of all present and future rights to this + software under copyright law. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + For more information, please refer to + diff --git a/collector/compile-benchmarks/cargo/README.md b/collector/compile-benchmarks/cargo/README.md new file mode 100644 index 000000000..dde37be1b --- /dev/null +++ b/collector/compile-benchmarks/cargo/README.md @@ -0,0 +1,84 @@ +Cargo downloads your Rust project’s dependencies and compiles your project. + +Learn more at http://doc.crates.io/ + +## Code Status +[![Build Status](https://travis-ci.org/rust-lang/cargo.svg?branch=master)](https://travis-ci.org/rust-lang/cargo) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/rust-lang/cargo?branch=master&svg=true)](https://ci.appveyor.com/project/rust-lang-libs/cargo) + +## Installing Cargo + +Cargo is distributed by default with Rust, so if you've got `rustc` installed +locally you probably also have `cargo` installed locally. + +## Compiling from Source + +Cargo requires the following tools and packages to build: + +* `python` +* `curl` (on Unix) +* `cmake` +* OpenSSL headers (only for Unix, this is the `libssl-dev` package on ubuntu) +* `cargo` and `rustc` + +First, you'll want to check out this repository + +``` +git clone --recursive https://github.com/rust-lang/cargo +cd cargo +``` + +With `cargo` already installed, you can simply run: + +``` +cargo build --release +``` + +## Adding new subcommands to Cargo + +Cargo is designed to be extensible with new subcommands without having to modify +Cargo itself. See [the Wiki page][third-party-subcommands] for more details and +a list of known community-developed subcommands. + +[third-party-subcommands]: https://github.com/rust-lang/cargo/wiki/Third-party-cargo-subcommands + + +## Releases + +High level release notes are available as part of [Rust's release notes][rel]. +Cargo releases coincide with Rust releases. + +[rel]: https://github.com/rust-lang/rust/blob/master/RELEASES.md + +## Reporting issues + +Found a bug? We'd love to know about it! + +Please report all issues on the github [issue tracker][issues]. + +[issues]: https://github.com/rust-lang/cargo/issues + + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) + + +## License + +Cargo is primarily distributed under the terms of both the MIT license +and the Apache License (Version 2.0). + +See LICENSE-APACHE and LICENSE-MIT for details. + +### Third party software + +This product includes software developed by the OpenSSL Project +for use in the OpenSSL Toolkit (http://www.openssl.org/). + +In binary form, this product includes software that is licensed under the +terms of the GNU General Public License, version 2, with a linking exception, +which can be obtained from the [upstream repository][1]. + +[1]: https://github.com/libgit2/libgit2 + diff --git a/collector/compile-benchmarks/cargo/appveyor.yml b/collector/compile-benchmarks/cargo/appveyor.yml new file mode 100644 index 000000000..e64eeb279 --- /dev/null +++ b/collector/compile-benchmarks/cargo/appveyor.yml @@ -0,0 +1,41 @@ +environment: + + # At the time this was added AppVeyor was having troubles with checking + # revocation of SSL certificates of sites like static.rust-lang.org and what + # we think is crates.io. The libcurl HTTP client by default checks for + # revocation on Windows and according to a mailing list [1] this can be + # disabled. + # + # The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL + # revocation checking on Windows in libcurl. Note, though, that rustup, which + # we're using to download Rust here, also uses libcurl as the default backend. + # Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation + # checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to + # use the Hyper instead of libcurl backend. Both Hyper and libcurl use + # schannel on Windows but it appears that Hyper configures it slightly + # differently such that revocation checking isn't turned on by default. + # + # [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html + RUSTUP_USE_HYPER: 1 + CARGO_HTTP_CHECK_REVOKE: false + + matrix: + - TARGET: x86_64-pc-windows-msvc + OTHER_TARGET: i686-pc-windows-msvc + MAKE_TARGETS: test-unit-x86_64-pc-windows-msvc + +install: + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc --default-toolchain nightly + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - rustup target add %OTHER_TARGET% + - rustc -V + - cargo -V + - git submodule update --init + +clone_depth: 1 + +build: false + +test_script: + - cargo test diff --git a/collector/compile-benchmarks/cargo/perf-config.json b/collector/compile-benchmarks/cargo/perf-config.json new file mode 100644 index 000000000..b3c296ba0 --- /dev/null +++ b/collector/compile-benchmarks/cargo/perf-config.json @@ -0,0 +1,8 @@ +{ + "cargo_rustc_opts": "--cap-lints=warn", + "cargo_opts": "--lib", + "runs": 1, + "touch_file": "src/cargo/lib.rs", + "category": "stable", + "artifact": "library" +} diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/.appveyor.yml b/collector/compile-benchmarks/cargo/socket2-0.2.3/.appveyor.yml new file mode 100644 index 000000000..4880006c5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/.appveyor.yml @@ -0,0 +1,16 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc +install: + - appveyor-retry appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe + - rustup-init.exe -y --default-host x86_64-pc-windows-msvc + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - if NOT "%TARGET%" == "x86_64-pc-windows-msvc" rustup target add %TARGET% + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test + - cargo test --features reuseport diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/.gitignore b/collector/compile-benchmarks/cargo/socket2-0.2.3/.gitignore new file mode 100644 index 000000000..4308d8220 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/.gitignore @@ -0,0 +1,3 @@ +target/ +**/*.rs.bk +Cargo.lock diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/.travis.yml b/collector/compile-benchmarks/cargo/socket2-0.2.3/.travis.yml new file mode 100644 index 000000000..e6a4bd7c1 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/.travis.yml @@ -0,0 +1,27 @@ +language: rust +rust: + - stable +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo test + - cargo test --features "reuseport unix pair" + - cargo doc --no-deps --all-features +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: "qibsiOrfM/GjYgYFXycSqKMwIK9ZR4cvHZsSqTtqrtxGq5Q7jTwMqdDl8KHDgX1a4it4tGay+7joex8k2zL6OQ+FljQGQq54EDiGw82HWix/fBpOMjMszw+GEDMG/9hUSb6HFdzAKLPAsBRvIs2QteJ60GhL/w4Z/EmfHlVKMnVsYUjfBf5BNlkv8yFvRMY6QqL+F85N7dDQ7JAgdiP79jR7LP8IlCEu/8pgSrf9pSqAHSC1Co1CaN8uhhMlcIIOZ5qYAK4Xty26r2EDzPm5Lw2Bd7a4maN0x+Be2DJvrnX30QkJNNU1XhxYkeZEeUCYAlUhBE5nBHpyyrbAxv+rJodPeyRl5EVpyqi8htPVmcnuA2XpNoHCud7CnzxaFytGvAC5kp0EgS7f3ac4hTnZXCfP0CvnT5UyWfWv9yLwQycdYcAsV4TnKxVAw4ykApGey+h0dyIM2VnzRPOo9D2ZS+JpzPHtx/PXD7aN7IungfTj4PmT+i00QNzkzJR9BqYKmEDBUcz6MLctg4D6xChhN8Go4hvk22F0RVyvEg1MAvXc07EKeWXG/VZ+H2frcPEceMGRBBHiOfOEE/2utNYgvIcmQxd1hvbm3cQOIjeXU2rGneN86cSmx7zNlfOyJUoBfsgGvSEzRxUueibUCaujB/El70HGrMlTnXeERiyd/2Y=" + +matrix: + include: + - rust: beta + - rust: nightly + +notifications: + email: + on_success: never +os: + - linux + - osx diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml b/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml new file mode 100644 index 000000000..4e63117f5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml @@ -0,0 +1,44 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g. crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "socket2" +version = "0.2.3" +authors = ["Alex Crichton "] +description = "Utilities for handling networking sockets with a maximal amount of configuration\npossible intended.\n" +homepage = "https://github.com/alexcrichton/socket2-rs" +documentation = "https://docs.rs/socket2" +readme = "README.md" +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/socket2-rs" +[package.metadata.docs.rs] +all-features = true +[dev-dependencies.tempdir] +version = "0.3" + +[features] +reuseport = [] +pair = [] +unix = [] +[target."cfg(windows)".dependencies.ws2_32-sys] +version = "0.2" + +[target."cfg(windows)".dependencies.winapi] +version = "0.2" + +[target."cfg(windows)".dependencies.kernel32-sys] +version = "0.2" +[target."cfg(unix)".dependencies.libc] +version = "0.2.14" + +[target."cfg(unix)".dependencies.cfg-if] +version = "0.1" diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml.orig b/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml.orig new file mode 100644 index 000000000..09b6680a3 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/Cargo.toml.orig @@ -0,0 +1,33 @@ +[package] +name = "socket2" +version = "0.2.3" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/alexcrichton/socket2-rs" +homepage = "https://github.com/alexcrichton/socket2-rs" +documentation = "https://docs.rs/socket2" +description = """ +Utilities for handling networking sockets with a maximal amount of configuration +possible intended. +""" + +[package.metadata.docs.rs] +all-features = true + +[target."cfg(windows)".dependencies] +ws2_32-sys = "0.2" +winapi = "0.2" +kernel32-sys = "0.2" + +[target."cfg(unix)".dependencies] +cfg-if = "0.1" +libc = "0.2.14" + +[dev-dependencies] +tempdir = "0.3" + +[features] +reuseport = [] +pair = [] +unix = [] diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-APACHE b/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-APACHE new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-MIT b/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-MIT new file mode 100644 index 000000000..39e0ed660 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/README.md b/collector/compile-benchmarks/cargo/socket2-0.2.3/README.md new file mode 100644 index 000000000..02c16d7d1 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/README.md @@ -0,0 +1,14 @@ +# socket2-rs + +[![Build Status](https://travis-ci.org/alexcrichton/socket2-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/socket2-rs) +[![Build status](https://ci.appveyor.com/api/projects/status/hovebj1gr4bgm3d9?svg=true)](https://ci.appveyor.com/project/alexcrichton/socket2-rs) + +[Documentation](https://docs.rs/socket2) + +# License + +`socket2-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/lib.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/lib.rs new file mode 100644 index 000000000..40e6084e6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/lib.rs @@ -0,0 +1,130 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Utilities for handling sockets +//! +//! This crate is sort of an evolution of the `net2` crate after seeing the +//! issues on it over time. The intention of this crate is to provide as direct +//! as possible access to the system's functionality for sockets as possible. No +//! extra fluff (e.g. multiple syscalls or builders) provided in this crate. As +//! a result using this crate can be a little wordy, but it should give you +//! maximal flexibility over configuration of sockets. +//! +//! # Examples +//! +//! ```no_run +//! use std::net::SocketAddr; +//! use socket2::{Socket, Domain, Type}; +//! +//! // create a TCP listener bound to two addresses +//! let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); +//! +//! socket.bind(&"127.0.0.1:12345".parse::().unwrap().into()).unwrap(); +//! socket.bind(&"127.0.0.1:12346".parse::().unwrap().into()).unwrap(); +//! socket.listen(128).unwrap(); +//! +//! let listener = socket.into_tcp_listener(); +//! // ... +//! ``` + +#![doc(html_root_url = "https://docs.rs/socket2/0.2")] +#![deny(missing_docs)] + +#[cfg(unix)] extern crate libc; +#[cfg(unix)] #[macro_use] extern crate cfg_if; + +#[cfg(windows)] extern crate kernel32; +#[cfg(windows)] extern crate winapi; +#[cfg(windows)] extern crate ws2_32; + +#[cfg(test)] extern crate tempdir; + +use utils::NetInt; + +#[cfg(unix)] use libc::{sockaddr_storage, socklen_t}; +#[cfg(windows)] use winapi::{SOCKADDR_STORAGE as sockaddr_storage, socklen_t}; + +mod sockaddr; +mod socket; +mod utils; + +#[cfg(unix)] #[path = "sys/unix/mod.rs"] mod sys; +#[cfg(windows)] #[path = "sys/windows.rs"] mod sys; + +/// Newtype, owned, wrapper around a system socket. +/// +/// This type simply wraps an instance of a file descriptor (`c_int`) on Unix +/// and an instance of `SOCKET` on Windows. This is the main type exported by +/// this crate and is intended to mirror the raw semantics of sockets on +/// platforms as closely as possible. Almost all methods correspond to +/// precisely one libc or OS API call which is essentially just a "Rustic +/// translation" of what's below. +/// +/// # Examples +/// +/// ```no_run +/// use std::net::SocketAddr; +/// use socket2::{Socket, Domain, Type, SockAddr}; +/// +/// // create a TCP listener bound to two addresses +/// let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); +/// +/// socket.bind(&"127.0.0.1:12345".parse::().unwrap().into()).unwrap(); +/// socket.bind(&"127.0.0.1:12346".parse::().unwrap().into()).unwrap(); +/// socket.listen(128).unwrap(); +/// +/// let listener = socket.into_tcp_listener(); +/// // ... +/// ``` +pub struct Socket { + inner: sys::Socket, +} + +/// The address of a socket. +/// +/// `SockAddr`s may be constructed directly to and from the standard library +/// `SocketAddr`, `SocketAddrV4`, and `SocketAddrV6` types. +pub struct SockAddr { + storage: sockaddr_storage, + len: socklen_t, +} + +/// Specification of the communication domain for a socket. +/// +/// This is a newtype wrapper around an integer which provides a nicer API in +/// addition to an injection point for documentation. Convenience constructors +/// such as `Domain::ipv4`, `Domain::ipv6`, etc, are provided to avoid reaching +/// into libc for various constants. +/// +/// This type is freely interconvertible with the `i32` type, however, if a raw +/// value needs to be provided. +pub struct Domain(i32); + +/// Specification of communication semantics on a socket. +/// +/// This is a newtype wrapper around an integer which provides a nicer API in +/// addition to an injection point for documentation. Convenience constructors +/// such as `Type::stream`, `Type::dgram`, etc, are provided to avoid reaching +/// into libc for various constants. +/// +/// This type is freely interconvertible with the `i32` type, however, if a raw +/// value needs to be provided. +pub struct Type(i32); + +/// Protocol specification used for creating sockets via `Socket::new`. +/// +/// This is a newtype wrapper around an integer which provides a nicer API in +/// addition to an injection point for documentation. +/// +/// This type is freely interconvertible with the `i32` type, however, if a raw +/// value needs to be provided. +pub struct Protocol(i32); + +fn hton(i: I) -> I { i.to_be() } diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sockaddr.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sockaddr.rs new file mode 100644 index 000000000..a9050a6dc --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sockaddr.rs @@ -0,0 +1,194 @@ +use std::fmt; +use std::mem; +use std::net::{SocketAddrV4, SocketAddrV6, SocketAddr}; +use std::ptr; + +#[cfg(unix)] +use libc::{sockaddr, sockaddr_storage, sockaddr_in, sockaddr_in6, sa_family_t, socklen_t, AF_INET, + AF_INET6}; +#[cfg(windows)] +use winapi::{SOCKADDR as sockaddr, SOCKADDR_STORAGE as sockaddr_storage, + SOCKADDR_IN as sockaddr_in, sockaddr_in6, + ADDRESS_FAMILY as sa_family_t, socklen_t, AF_INET, AF_INET6}; + +use SockAddr; + +impl fmt::Debug for SockAddr { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + let mut builder = fmt.debug_struct("SockAddr"); + builder.field("family", &self.family()); + if let Some(addr) = self.as_inet() { + builder.field("inet", &addr); + } else if let Some(addr) = self.as_inet6() { + builder.field("inet6", &addr); + } + builder.finish() + } +} + +impl SockAddr { + /// Constructs a `SockAddr` from its raw components. + pub unsafe fn from_raw_parts(addr: *const sockaddr, len: socklen_t) -> SockAddr { + let mut storage = mem::uninitialized::(); + ptr::copy_nonoverlapping(addr as *const _ as *const u8, + &mut storage as *mut _ as *mut u8, + len as usize); + + SockAddr { + storage: storage, + len: len, + } + } + + /// Constructs a `SockAddr` with the family `AF_UNIX` and the provided path. + /// + /// This function is only available on Unix when the `unix` feature is + /// enabled. + /// + /// # Failure + /// + /// Returns an error if the path is longer than `SUN_LEN`. + #[cfg(all(unix, feature = "unix"))] + pub fn unix

(path: P) -> ::std::io::Result + where P: AsRef<::std::path::Path> + { + use std::cmp::Ordering; + use std::io; + use std::os::unix::ffi::OsStrExt; + use libc::{sockaddr_un, AF_UNIX, c_char}; + + unsafe { + let mut addr = mem::zeroed::(); + addr.sun_family = AF_UNIX as sa_family_t; + + let bytes = path.as_ref().as_os_str().as_bytes(); + + match (bytes.get(0), bytes.len().cmp(&addr.sun_path.len())) { + // Abstract paths don't need a null terminator + (Some(&0), Ordering::Greater) => { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "path must be no longer than SUN_LEN")); + } + (Some(&0), _) => {} + (_, Ordering::Greater) | (_, Ordering::Equal) => { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "path must be shorter than SUN_LEN")); + } + _ => {} + } + + for (dst, src) in addr.sun_path.iter_mut().zip(bytes) { + *dst = *src as c_char; + } + // null byte for pathname is already there since we zeroed up front + + let base = &addr as *const _ as usize; + let path = &addr.sun_path as *const _ as usize; + let sun_path_offset = path - base; + + let mut len = sun_path_offset + bytes.len(); + match bytes.get(0) { + Some(&0) | None => {} + Some(_) => len += 1, + } + Ok(SockAddr::from_raw_parts(&addr as *const _ as *const _, len as socklen_t)) + } + } + + unsafe fn as_(&self, family: sa_family_t) -> Option { + if self.storage.ss_family != family { + return None; + } + + Some(mem::transmute_copy(&self.storage)) + } + + /// Returns this address as a `SocketAddrV4` if it is in the `AF_INET` + /// family. + pub fn as_inet(&self) -> Option { + unsafe { self.as_(AF_INET as sa_family_t) } + } + + /// Returns this address as a `SocketAddrV4` if it is in the `AF_INET6` + /// family. + pub fn as_inet6(&self) -> Option { + unsafe { self.as_(AF_INET6 as sa_family_t) } + } + + /// Returns this address's family. + pub fn family(&self) -> sa_family_t { + self.storage.ss_family + } + + /// Returns the size of this address in bytes. + pub fn len(&self) -> socklen_t { + self.len + } + + /// Returns a raw pointer to the address. + pub fn as_ptr(&self) -> *const sockaddr { + &self.storage as *const _ as *const _ + } +} + +// SocketAddrV4 and SocketAddrV6 are just wrappers around sockaddr_in and sockaddr_in6 + +// check to make sure that the sizes at least match up +fn _size_checks(v4: SocketAddrV4, v6: SocketAddrV6) { + /*unsafe { + mem::transmute::(v4); + mem::transmute::(v6); + }*/ +} + +impl From for SockAddr { + fn from(addr: SocketAddrV4) -> SockAddr { + unsafe { + SockAddr::from_raw_parts(&addr as *const _ as *const _, + mem::size_of::() as socklen_t) + } + } +} + +impl From for SockAddr { + fn from(addr: SocketAddrV6) -> SockAddr { + unsafe { + SockAddr::from_raw_parts(&addr as *const _ as *const _, + mem::size_of::() as socklen_t) + } + } +} + +impl From for SockAddr { + fn from(addr: SocketAddr) -> SockAddr { + match addr { + SocketAddr::V4(addr) => addr.into(), + SocketAddr::V6(addr) => addr.into(), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn inet() { + let raw = "127.0.0.1:80".parse::().unwrap(); + let addr = SockAddr::from(raw); + assert!(addr.as_inet6().is_none()); + let addr = addr.as_inet().unwrap(); + assert_eq!(raw, addr); + } + + #[test] + fn inet6() { + let raw = "[2001:db8::ff00:42:8329]:80" + .parse::() + .unwrap(); + let addr = SockAddr::from(raw); + assert!(addr.as_inet().is_none()); + let addr = addr.as_inet6().unwrap(); + assert_eq!(raw, addr); + } +} diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/socket.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/socket.rs new file mode 100644 index 000000000..eed39ba04 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/socket.rs @@ -0,0 +1,852 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; +use std::io::{self, Read, Write}; +use std::net::{self, Ipv4Addr, Ipv6Addr, Shutdown}; +use std::time::Duration; +#[cfg(all(unix, feature = "unix"))] +use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; + +#[cfg(unix)] +use libc as c; +#[cfg(windows)] +use winapi as c; + +use sys; +use {Socket, Protocol, Domain, Type, SockAddr}; + +impl Socket { + /// Creates a new socket ready to be configured. + /// + /// This function corresponds to `socket(2)` and simply creates a new + /// socket, no other configuration is done and further functions must be + /// invoked to configure this socket. + pub fn new(domain: Domain, + type_: Type, + protocol: Option) -> io::Result { + let protocol = protocol.map(|p| p.0).unwrap_or(0); + Ok(Socket { + inner: sys::Socket::new(domain.0, type_.0, protocol)?, + }) + } + + /// Creates a pair of sockets which are connected to each other. + /// + /// This function corresponds to `socketpair(2)`. + /// + /// This function is only available on Unix when the `pair` feature is + /// enabled. + #[cfg(all(unix, feature = "pair"))] + pub fn pair(domain: Domain, + type_: Type, + protocol: Option) -> io::Result<(Socket, Socket)> { + let protocol = protocol.map(|p| p.0).unwrap_or(0); + let sockets = sys::Socket::pair(domain.0, type_.0, protocol)?; + Ok((Socket { inner: sockets.0 }, Socket { inner: sockets.1 })) + } + + /// Consumes this `Socket`, converting it to a `TcpStream`. + pub fn into_tcp_stream(self) -> net::TcpStream { + self.into() + } + + /// Consumes this `Socket`, converting it to a `TcpListener`. + pub fn into_tcp_listener(self) -> net::TcpListener { + self.into() + } + + /// Consumes this `Socket`, converting it to a `UdpSocket`. + pub fn into_udp_socket(self) -> net::UdpSocket { + self.into() + } + + /// Consumes this `Socket`, converting it into a `UnixStream`. + /// + /// This function is only available on Unix when the `unix` feature is + /// enabled. + #[cfg(all(unix, feature = "unix"))] + pub fn into_unix_stream(self) -> UnixStream { + self.into() + } + + /// Consumes this `Socket`, converting it into a `UnixListener`. + /// + /// This function is only available on Unix when the `unix` feature is + /// enabled. + #[cfg(all(unix, feature = "unix"))] + pub fn into_unix_listener(self) -> UnixListener { + self.into() + } + + /// Consumes this `Socket`, converting it into a `UnixDatagram`. + /// + /// This function is only available on Unix when the `unix` feature is + /// enabled. + #[cfg(all(unix, feature = "unix"))] + pub fn into_unix_datagram(self) -> UnixDatagram { + self.into() + } + + /// Initiate a connection on this socket to the specified address. + /// + /// This function directly corresponds to the connect(2) function on Windows + /// and Unix. + /// + /// An error will be returned if `listen` or `connect` has already been + /// called on this builder. + pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { + self.inner.connect(addr) + } + + /// Initiate a connection on this socket to the specified address, only + /// only waiting for a certain period of time for the connection to be + /// established. + /// + /// Unlike many other methods on `Socket`, this does *not* correspond to a + /// single C function. It sets the socket to nonblocking mode, connects via + /// connect(2), and then waits for the connection to complete with poll(2) + /// on Unix and select on Windows. When the connection is complete, the + /// socket is set back to blocking mode. On Unix, this will loop over + /// `EINTR` errors. + /// + /// # Warnings + /// + /// The nonblocking state of the socket is overridden by this function - + /// it will be returned in blocking mode on success, and in an indeterminate + /// state on failure. + /// + /// If the connection request times out, it may still be processing in the + /// background - a second call to `connect` or `connect_timeout` may fail. + pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { + self.inner.connect_timeout(addr, timeout) + } + + /// Binds this socket to the specified address. + /// + /// This function directly corresponds to the bind(2) function on Windows + /// and Unix. + pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { + self.inner.bind(addr) + } + + /// Mark a socket as ready to accept incoming connection requests using + /// accept() + /// + /// This function directly corresponds to the listen(2) function on Windows + /// and Unix. + /// + /// An error will be returned if `listen` or `connect` has already been + /// called on this builder. + pub fn listen(&self, backlog: i32) -> io::Result<()> { + self.inner.listen(backlog) + } + + /// Accept a new incoming connection from this listener. + /// + /// This function will block the calling thread until a new connection is + /// established. When established, the corresponding `Socket` and the + /// remote peer's address will be returned. + pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { + self.inner.accept().map(|(socket, addr)| { + (Socket { inner: socket }, addr) + }) + } + + /// Returns the socket address of the local half of this TCP connection. + pub fn local_addr(&self) -> io::Result { + self.inner.local_addr() + } + + /// Returns the socket address of the remote peer of this TCP connection. + pub fn peer_addr(&self) -> io::Result { + self.inner.peer_addr() + } + + /// Creates a new independently owned handle to the underlying socket. + /// + /// The returned `TcpStream` is a reference to the same stream that this + /// object references. Both handles will read and write the same stream of + /// data, and options set on one stream will be propagated to the other + /// stream. + pub fn try_clone(&self) -> io::Result { + self.inner.try_clone().map(|s| Socket { inner: s }) + } + + /// Get the value of the `SO_ERROR` option on this socket. + /// + /// This will retrieve the stored error in the underlying socket, clearing + /// the field in the process. This can be useful for checking errors between + /// calls. + pub fn take_error(&self) -> io::Result> { + self.inner.take_error() + } + + /// Moves this TCP stream into or out of nonblocking mode. + /// + /// On Unix this corresponds to calling fcntl, and on Windows this + /// corresponds to calling ioctlsocket. + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + self.inner.set_nonblocking(nonblocking) + } + + /// Shuts down the read, write, or both halves of this connection. + /// + /// This function will cause all pending and future I/O on the specified + /// portions to return immediately with an appropriate value. + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + self.inner.shutdown(how) + } + + /// Receives data on the socket from the remote address to which it is + /// connected. + /// + /// The [`connect`] method will connect this socket to a remote address. This + /// method will fail if the socket is not connected. + /// + /// [`connect`]: #method.connect + pub fn recv(&self, buf: &mut [u8]) -> io::Result { + self.inner.recv(buf) + } + + /// Receives data on the socket from the remote adress to which it is + /// connected, without removing that data from the queue. On success, + /// returns the number of bytes peeked. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying `recv` system call. + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + self.inner.peek(buf) + } + + /// Receives data from the socket. On success, returns the number of bytes + /// read and the address from whence the data came. + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.inner.recv_from(buf) + } + + /// Receives data from the socket, without removing it from the queue. + /// + /// Successive calls return the same data. This is accomplished by passing + /// `MSG_PEEK` as a flag to the underlying `recvfrom` system call. + /// + /// On success, returns the number of bytes peeked and the address from + /// whence the data came. + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.inner.peek_from(buf) + } + + /// Sends data on the socket to a connected peer. + /// + /// This is typically used on TCP sockets or datagram sockets which have + /// been connected. + /// + /// On success returns the number of bytes that were sent. + pub fn send(&self, buf: &[u8]) -> io::Result { + self.inner.send(buf) + } + + /// Sends data on the socket to the given address. On success, returns the + /// number of bytes written. + /// + /// This is typically used on UDP or datagram-oriented sockets. On success + /// returns the number of bytes that were sent. + pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { + self.inner.send_to(buf, addr) + } + + // ================================================ + + /// Gets the value of the `IP_TTL` option for this socket. + /// + /// For more information about this option, see [`set_ttl`][link]. + /// + /// [link]: #method.set_ttl + pub fn ttl(&self) -> io::Result { + self.inner.ttl() + } + + /// Sets the value for the `IP_TTL` option on this socket. + /// + /// This value sets the time-to-live field that is used in every packet sent + /// from this socket. + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + self.inner.set_ttl(ttl) + } + + /// Gets the value of the `IPV6_V6ONLY` option for this socket. + /// + /// For more information about this option, see [`set_only_v6`][link]. + /// + /// [link]: #method.set_only_v6 + pub fn only_v6(&self) -> io::Result { + self.inner.only_v6() + } + + /// Sets the value for the `IPV6_V6ONLY` option on this socket. + /// + /// If this is set to `true` then the socket is restricted to sending and + /// receiving IPv6 packets only. In this case two IPv4 and IPv6 applications + /// can bind the same port at the same time. + /// + /// If this is set to `false` then the socket can be used to send and + /// receive packets from an IPv4-mapped IPv6 address. + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + self.inner.set_only_v6(only_v6) + } + + /// Returns the read timeout of this socket. + /// + /// If the timeout is `None`, then `read` calls will block indefinitely. + pub fn read_timeout(&self) -> io::Result> { + self.inner.read_timeout() + } + + /// Sets the read timeout to the timeout specified. + /// + /// If the value specified is `None`, then `read` calls will block + /// indefinitely. It is an error to pass the zero `Duration` to this + /// method. + pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { + self.inner.set_read_timeout(dur) + } + + /// Returns the write timeout of this socket. + /// + /// If the timeout is `None`, then `write` calls will block indefinitely. + pub fn write_timeout(&self) -> io::Result> { + self.inner.write_timeout() + } + + /// Sets the write timeout to the timeout specified. + /// + /// If the value specified is `None`, then `write` calls will block + /// indefinitely. It is an error to pass the zero `Duration` to this + /// method. + pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { + self.inner.set_write_timeout(dur) + } + + /// Gets the value of the `TCP_NODELAY` option on this socket. + /// + /// For more information about this option, see [`set_nodelay`][link]. + /// + /// [link]: #method.set_nodelay + pub fn nodelay(&self) -> io::Result { + self.inner.nodelay() + } + + /// Sets the value of the `TCP_NODELAY` option on this socket. + /// + /// If set, this option disables the Nagle algorithm. This means that + /// segments are always sent as soon as possible, even if there is only a + /// small amount of data. When not set, data is buffered until there is a + /// sufficient amount to send out, thereby avoiding the frequent sending of + /// small packets. + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + self.inner.set_nodelay(nodelay) + } + + /// Sets the value of the `SO_BROADCAST` option for this socket. + /// + /// When enabled, this socket is allowed to send packets to a broadcast + /// address. + pub fn broadcast(&self) -> io::Result { + self.inner.broadcast() + } + + /// Gets the value of the `SO_BROADCAST` option for this socket. + /// + /// For more information about this option, see + /// [`set_broadcast`][link]. + /// + /// [link]: #method.set_broadcast + pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { + self.inner.set_broadcast(broadcast) + } + + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_ttl_v4`][link]. + /// + /// [link]: #method.set_multicast_ttl_v4 + pub fn multicast_loop_v4(&self) -> io::Result { + self.inner.multicast_loop_v4() + } + + /// Sets the value of the `IP_MULTICAST_LOOP` option for this socket. + /// + /// If enabled, multicast packets will be looped back to the local socket. + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { + self.inner.set_multicast_loop_v4(multicast_loop_v4) + } + + /// Gets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_ttl_v4`][link]. + /// + /// [link]: #method.set_multicast_ttl_v4 + pub fn multicast_ttl_v4(&self) -> io::Result { + self.inner.multicast_ttl_v4() + } + + /// Sets the value of the `IP_MULTICAST_TTL` option for this socket. + /// + /// Indicates the time-to-live value of outgoing multicast packets for + /// this socket. The default value is 1 which means that multicast packets + /// don't leave the local network unless explicitly requested. + /// + /// Note that this may not have any affect on IPv6 sockets. + pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { + self.inner.set_multicast_ttl_v4(multicast_ttl_v4) + } + + /// Gets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// For more information about this option, see + /// [`set_multicast_loop_v6`][link]. + /// + /// [link]: #method.set_multicast_loop_v6 + pub fn multicast_loop_v6(&self) -> io::Result { + self.inner.multicast_loop_v6() + } + + /// Sets the value of the `IPV6_MULTICAST_LOOP` option for this socket. + /// + /// Controls whether this socket sees the multicast packets it sends itself. + /// Note that this may not have any affect on IPv4 sockets. + pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { + self.inner.set_multicast_loop_v6(multicast_loop_v6) + } + + + /// Executes an operation of the `IP_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// address of the local interface with which the system should join the + /// multicast group. If it's equal to `INADDR_ANY` then an appropriate + /// interface is chosen by the system. + pub fn join_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.inner.join_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_ADD_MEMBERSHIP` type. + /// + /// This function specifies a new multicast group for this socket to join. + /// The address must be a valid multicast address, and `interface` is the + /// index of the interface to join/leave (or 0 to indicate any interface). + pub fn join_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.inner.join_multicast_v6(multiaddr, interface) + } + + /// Executes an operation of the `IP_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see + /// [`join_multicast_v4`][link]. + /// + /// [link]: #method.join_multicast_v4 + pub fn leave_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + self.inner.leave_multicast_v4(multiaddr, interface) + } + + /// Executes an operation of the `IPV6_DROP_MEMBERSHIP` type. + /// + /// For more information about this option, see + /// [`join_multicast_v6`][link]. + /// + /// [link]: #method.join_multicast_v6 + pub fn leave_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + self.inner.leave_multicast_v6(multiaddr, interface) + } + + /// Reads the linger duration for this socket by getting the SO_LINGER + /// option + pub fn linger(&self) -> io::Result> { + self.inner.linger() + } + + /// Sets the linger duration of this socket by setting the SO_LINGER option + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + self.inner.set_linger(dur) + } + + /// Check the `SO_REUSEADDR` option on this socket. + pub fn reuse_address(&self) -> io::Result { + self.inner.reuse_address() + } + + /// Set value for the `SO_REUSEADDR` option on this socket. + /// + /// This indicates that futher calls to `bind` may allow reuse of local + /// addresses. For IPv4 sockets this means that a socket may bind even when + /// there's a socket already listening on this port. + pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { + self.inner.set_reuse_address(reuse) + } + + /// Gets the value of the `SO_RCVBUF` option on this socket. + /// + /// For more information about this option, see + /// [`set_recv_buffer_size`][link]. + /// + /// [link]: #method.set_recv_buffer_size + pub fn recv_buffer_size(&self) -> io::Result { + self.inner.recv_buffer_size() + } + + /// Sets the value of the `SO_RCVBUF` option on this socket. + /// + /// Changes the size of the operating system's receive buffer associated + /// with the socket. + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + self.inner.set_recv_buffer_size(size) + } + + /// Gets the value of the `SO_SNDBUF` option on this socket. + /// + /// For more information about this option, see [`set_send_buffer`][link]. + /// + /// [link]: #method.set_send_buffer + pub fn send_buffer_size(&self) -> io::Result { + self.inner.send_buffer_size() + } + + /// Sets the value of the `SO_SNDBUF` option on this socket. + /// + /// Changes the size of the operating system's send buffer associated with + /// the socket. + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + self.inner.set_send_buffer_size(size) + } + + /// Returns whether keepalive messages are enabled on this socket, and if so + /// the duration of time between them. + /// + /// For more information about this option, see [`set_keepalive`][link]. + /// + /// [link]: #method.set_keepalive + pub fn keepalive(&self) -> io::Result> { + self.inner.keepalive() + } + + /// Sets whether keepalive messages are enabled to be sent on this socket. + /// + /// On Unix, this option will set the `SO_KEEPALIVE` as well as the + /// `TCP_KEEPALIVE` or `TCP_KEEPIDLE` option (depending on your platform). + /// On Windows, this will set the `SIO_KEEPALIVE_VALS` option. + /// + /// If `None` is specified then keepalive messages are disabled, otherwise + /// the duration specified will be the time to remain idle before sending a + /// TCP keepalive probe. + /// + /// Some platforms specify this value in seconds, so sub-second + /// specifications may be omitted. + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + self.inner.set_keepalive(keepalive) + } + + /// Check the value of the `SO_REUSEPORT` option on this socket. + /// + /// This function is only available on Unix when the `reuseport` feature is + /// enabled. + #[cfg(all(unix, feature = "reuseport"))] + pub fn reuse_port(&self) -> io::Result { + self.inner.reuse_port() + } + + /// Set value for the `SO_REUSEPORT` option on this socket. + /// + /// This indicates that futher calls to `bind` may allow reuse of local + /// addresses. For IPv4 sockets this means that a socket may bind even when + /// there's a socket already listening on this port. + /// + /// This function is only available on Unix when the `reuseport` feature is + /// enabled. + #[cfg(all(unix, feature = "reuseport"))] + pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { + self.inner.set_reuse_port(reuse) + } +} + +impl Read for Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.inner.read(buf) + } +} + +impl<'a> Read for &'a Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + (&self.inner).read(buf) + } +} + +impl Write for Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.inner.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.inner.flush() + } +} + +impl<'a> Write for &'a Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + (&self.inner).write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + (&self.inner).flush() + } +} + +impl fmt::Debug for Socket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} + +impl From for Socket { + fn from(socket: net::TcpStream) -> Socket { + Socket { inner: socket.into() } + } +} + +impl From for Socket { + fn from(socket: net::TcpListener) -> Socket { + Socket { inner: socket.into() } + } +} + +impl From for Socket { + fn from(socket: net::UdpSocket) -> Socket { + Socket { inner: socket.into() } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixStream) -> Socket { + Socket { inner: socket.into() } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixListener) -> Socket { + Socket { inner: socket.into() } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixDatagram) -> Socket { + Socket { inner: socket.into() } + } +} + +impl From for net::TcpStream { + fn from(socket: Socket) -> net::TcpStream { + socket.inner.into() + } +} + +impl From for net::TcpListener { + fn from(socket: Socket) -> net::TcpListener { + socket.inner.into() + } +} + +impl From for net::UdpSocket { + fn from(socket: Socket) -> net::UdpSocket { + socket.inner.into() + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixStream { + fn from(socket: Socket) -> UnixStream { + socket.inner.into() + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixListener { + fn from(socket: Socket) -> UnixListener { + socket.inner.into() + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixDatagram { + fn from(socket: Socket) -> UnixDatagram { + socket.inner.into() + } +} + +impl Domain { + /// Domain for IPv4 communication, corresponding to `AF_INET`. + pub fn ipv4() -> Domain { + Domain(c::AF_INET) + } + + /// Domain for IPv6 communication, corresponding to `AF_INET6`. + pub fn ipv6() -> Domain { + Domain(c::AF_INET6) + } + + /// Domain for Unix socket communication, corresponding to `AF_UNIX`. + /// + /// This function is only available on Unix when the `unix` feature is + /// activated. + #[cfg(all(unix, feature = "unix"))] + pub fn unix() -> Domain { + Domain(c::AF_UNIX) + } +} + +impl From for Domain { + fn from(a: i32) -> Domain { + Domain(a) + } +} + +impl From for i32 { + fn from(a: Domain) -> i32 { + a.into() + } +} + +impl Type { + /// Type corresponding to `SOCK_STREAM` + /// + /// Used for protocols such as TCP. + pub fn stream() -> Type { + Type(c::SOCK_STREAM) + } + + /// Type corresponding to `SOCK_DGRAM` + /// + /// Used for protocols such as UDP. + pub fn dgram() -> Type { + Type(c::SOCK_DGRAM) + } + + /// Type corresponding to `SOCK_SEQPACKET` + pub fn seqpacket() -> Type { + Type(c::SOCK_SEQPACKET) + } + + /// Type corresponding to `SOCK_RAW` + pub fn raw() -> Type { + Type(c::SOCK_RAW) + } +} + +impl From for Type { + fn from(a: i32) -> Type { + Type(a) + } +} + +impl From for i32 { + fn from(a: Type) -> i32 { + a.into() + } +} + +impl From for Protocol { + fn from(a: i32) -> Protocol { + Protocol(a) + } +} + +impl From for i32 { + fn from(a: Protocol) -> i32 { + a.into() + } +} + +#[cfg(test)] +mod test { + use std::net::SocketAddr; + + use super::*; + + #[test] + fn connect_timeout_unrouteable() { + // this IP is unroutable, so connections should always time out + let addr = "10.255.255.1:80".parse::().unwrap().into(); + + let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); + match socket.connect_timeout(&addr, Duration::from_millis(250)) { + Ok(_) => panic!("unexpected success"), + Err(ref e) if e.kind() == io::ErrorKind::TimedOut => {} + Err(e) => panic!("unexpected error {}", e), + } + } + + #[test] + fn connect_timeout_valid() { + let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); + socket.bind(&"127.0.0.1:0".parse::().unwrap().into()).unwrap(); + socket.listen(128).unwrap(); + + let addr = socket.local_addr().unwrap(); + + let socket = Socket::new(Domain::ipv4(), Type::stream(), None).unwrap(); + socket.connect_timeout(&addr, Duration::from_millis(250)).unwrap(); + } + + #[test] + #[cfg(all(unix, feature = "pair", feature = "unix"))] + fn pair() { + let (mut a, mut b) = Socket::pair(Domain::unix(), Type::stream(), None).unwrap(); + a.write_all(b"hello world").unwrap(); + let mut buf = [0; 11]; + b.read_exact(&mut buf).unwrap(); + assert_eq!(buf, &b"hello world"[..]); + } + + #[test] + #[cfg(all(unix, feature = "unix"))] + fn unix() { + use tempdir::TempDir; + + let dir = TempDir::new("unix").unwrap(); + let addr = SockAddr::unix(dir.path().join("sock")).unwrap(); + + let listener = Socket::new(Domain::unix(), Type::stream(), None).unwrap(); + listener.bind(&addr).unwrap(); + listener.listen(10).unwrap(); + + let mut a = Socket::new(Domain::unix(), Type::stream(), None).unwrap(); + a.connect(&addr).unwrap(); + + let mut b = listener.accept().unwrap().0; + + a.write_all(b"hello world").unwrap(); + let mut buf = [0; 11]; + b.read_exact(&mut buf).unwrap(); + assert_eq!(buf, &b"hello world"[..]); + } +} diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/mod.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/mod.rs new file mode 100644 index 000000000..3d1fcb89d --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/mod.rs @@ -0,0 +1,1028 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cmp; +use std::fmt; +use std::io::{Read, Write, ErrorKind}; +use std::io; +use std::mem; +use std::net::Shutdown; +use std::net::{self, Ipv4Addr, Ipv6Addr}; +use std::ops::Neg; +use std::os::unix::prelude::*; +use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT}; +use std::time::{Duration, Instant}; +#[cfg(feature = "unix")] +use std::os::unix::net::{UnixDatagram, UnixListener, UnixStream}; + +use libc::{self, c_void, c_int}; +use libc::{sockaddr, socklen_t, ssize_t}; + +cfg_if! { + if #[cfg(any(target_os = "dragonfly", target_os = "freebsd", + target_os = "ios", target_os = "macos", + target_os = "openbsd", target_os = "netbsd", + target_os = "solaris", target_os = "haiku"))] { + use libc::IPV6_JOIN_GROUP as IPV6_ADD_MEMBERSHIP; + use libc::IPV6_LEAVE_GROUP as IPV6_DROP_MEMBERSHIP; + } else { + use libc::IPV6_ADD_MEMBERSHIP; + use libc::IPV6_DROP_MEMBERSHIP; + } +} + +cfg_if! { + if #[cfg(any(target_os = "linux", target_os = "android", + target_os = "dragonfly", target_os = "freebsd", + target_os = "openbsd", target_os = "netbsd", + target_os = "haiku", target_os = "bitrig"))] { + use libc::MSG_NOSIGNAL; + } else { + const MSG_NOSIGNAL: c_int = 0x0; + } +} + +cfg_if! { + if #[cfg(any(target_os = "macos", target_os = "ios"))] { + use libc::TCP_KEEPALIVE as KEEPALIVE_OPTION; + } else if #[cfg(any(target_os = "openbsd", target_os = "netbsd", target_os = "haiku"))] { + use libc::SO_KEEPALIVE as KEEPALIVE_OPTION; + } else { + use libc::TCP_KEEPIDLE as KEEPALIVE_OPTION; + } +} + +use SockAddr; +use utils::One; + +#[macro_use] +#[cfg(target_os = "linux")] +mod weak; + +pub struct Socket { + fd: c_int, +} + +impl Socket { + pub fn new(family: c_int, ty: c_int, protocol: c_int) -> io::Result { + unsafe { + // On linux we first attempt to pass the SOCK_CLOEXEC flag to + // atomically create the socket and set it as CLOEXEC. Support for + // this option, however, was added in 2.6.27, and we still support + // 2.6.18 as a kernel, so if the returned error is EINVAL we + // fallthrough to the fallback. + #[cfg(target_os = "linux")] { + match cvt(libc::socket(family, ty | libc::SOCK_CLOEXEC, protocol)) { + Ok(fd) => return Ok(Socket::from_raw_fd(fd)), + Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => {} + Err(e) => return Err(e), + } + } + + let fd = cvt(libc::socket(family, ty, protocol))?; + let fd = Socket::from_raw_fd(fd); + set_cloexec(fd.as_raw_fd())?; + #[cfg(target_os = "macos")] { + fd.setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; + } + Ok(fd) + } + } + + pub fn pair(family: c_int, ty: c_int, protocol: c_int) -> io::Result<(Socket, Socket)> { + unsafe { + let mut fds = [0, 0]; + cvt(libc::socketpair(family, ty, protocol, fds.as_mut_ptr()))?; + let fds = (Socket::from_raw_fd(fds[0]), Socket::from_raw_fd(fds[1])); + set_cloexec(fds.0.as_raw_fd())?; + set_cloexec(fds.1.as_raw_fd())?; + #[cfg(target_os = "macos")] { + fds.0.setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; + fds.1.setsockopt(libc::SOL_SOCKET, libc::SO_NOSIGPIPE, 1i32)?; + } + Ok(fds) + } + } + + pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { + unsafe { + cvt(libc::bind(self.fd, addr.as_ptr(), addr.len() as _)).map(|_| ()) + } + } + + pub fn listen(&self, backlog: i32) -> io::Result<()> { + unsafe { + cvt(libc::listen(self.fd, backlog)).map(|_| ()) + } + } + + pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { + unsafe { + cvt(libc::connect(self.fd, addr.as_ptr(), addr.len())).map(|_| ()) + } + } + + pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = self.connect(addr); + self.set_nonblocking(false)?; + + match r { + Ok(()) => return Ok(()), + // there's no io::ErrorKind conversion registered for EINPROGRESS :( + Err(ref e) if e.raw_os_error() == Some(libc::EINPROGRESS) => {} + Err(e) => return Err(e), + } + + let mut pollfd = libc::pollfd { + fd: self.fd, + events: libc::POLLOUT, + revents: 0, + }; + + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout")); + } + + let start = Instant::now(); + + loop { + let elapsed = start.elapsed(); + if elapsed >= timeout { + return Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")); + } + + let timeout = timeout - elapsed; + let mut timeout = timeout.as_secs() + .saturating_mul(1_000) + .saturating_add(timeout.subsec_nanos() as u64 / 1_000_000); + if timeout == 0 { + timeout = 1; + } + + let timeout = cmp::min(timeout, c_int::max_value() as u64) as c_int; + + match unsafe { libc::poll(&mut pollfd, 1, timeout) } { + -1 => { + let err = io::Error::last_os_error(); + if err.kind() != io::ErrorKind::Interrupted { + return Err(err); + } + } + 0 => return Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")), + _ => { + if pollfd.revents & libc::POLLOUT == 0 { + if let Some(e) = self.take_error()? { + return Err(e); + } + } + return Ok(()); + } + } + } + } + + pub fn local_addr(&self) -> io::Result { + unsafe { + let mut storage: libc::sockaddr_storage = mem::zeroed(); + let mut len = mem::size_of_val(&storage) as libc::socklen_t; + cvt(libc::getsockname(self.fd, + &mut storage as *mut _ as *mut _, + &mut len))?; + Ok(SockAddr::from_raw_parts(&storage as *const _ as *const _, len)) + } + } + + pub fn peer_addr(&self) -> io::Result { + unsafe { + let mut storage: libc::sockaddr_storage = mem::zeroed(); + let mut len = mem::size_of_val(&storage) as libc::socklen_t; + cvt(libc::getpeername(self.fd, + &mut storage as *mut _ as *mut _, + &mut len))?; + Ok(SockAddr::from_raw_parts(&storage as *const _ as *const _, len)) + } + } + + pub fn try_clone(&self) -> io::Result { + // implementation lifted from libstd + #[cfg(any(target_os = "android", target_os = "haiku"))] + use libc::F_DUPFD as F_DUPFD_CLOEXEC; + #[cfg(not(any(target_os = "android", target_os = "haiku")))] + use libc::F_DUPFD_CLOEXEC; + + static CLOEXEC_FAILED: AtomicBool = ATOMIC_BOOL_INIT; + unsafe { + if !CLOEXEC_FAILED.load(Ordering::Relaxed) { + match cvt(libc::fcntl(self.fd, F_DUPFD_CLOEXEC, 0)) { + Ok(fd) => { + let fd = Socket::from_raw_fd(fd); + if cfg!(target_os = "linux") { + set_cloexec(fd.as_raw_fd())?; + } + return Ok(fd) + } + Err(ref e) if e.raw_os_error() == Some(libc::EINVAL) => { + CLOEXEC_FAILED.store(true, Ordering::Relaxed); + } + Err(e) => return Err(e), + } + } + let fd = cvt(libc::fcntl(self.fd, libc::F_DUPFD, 0))?; + let fd = Socket::from_raw_fd(fd); + set_cloexec(fd.as_raw_fd())?; + Ok(fd) + } + } + + #[allow(unused_mut)] + pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { + let mut storage: libc::sockaddr_storage = unsafe { mem::zeroed() }; + let mut len = mem::size_of_val(&storage) as socklen_t; + + let mut socket = None; + #[cfg(target_os = "linux")] { + weak! { + fn accept4(c_int, *mut sockaddr, *mut socklen_t, c_int) -> c_int + } + if let Some(f) = accept4.get() { + let res = cvt_r(|| unsafe { + f(self.fd, + &mut storage as *mut _ as *mut _, + &mut len, + libc::SOCK_CLOEXEC) + }); + match res { + Ok(fd) => socket = Some(Socket { fd: fd }), + Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => {} + Err(e) => return Err(e), + } + } + } + + let socket = match socket { + Some(socket) => socket, + None => unsafe { + let fd = cvt_r(|| { + libc::accept(self.fd, + &mut storage as *mut _ as *mut _, + &mut len) + })?; + let fd = Socket::from_raw_fd(fd); + set_cloexec(fd.as_raw_fd())?; + fd + } + }; + let addr = unsafe { SockAddr::from_raw_parts(&storage as *const _ as *const _, len) }; + Ok((socket, addr)) + } + + pub fn take_error(&self) -> io::Result> { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_ERROR)?; + if raw == 0 { + Ok(None) + } else { + Ok(Some(io::Error::from_raw_os_error(raw as i32))) + } + } + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let previous = cvt(libc::fcntl(self.fd, libc::F_GETFL))?; + let new = if nonblocking { + previous | libc::O_NONBLOCK + } else { + previous & !libc::O_NONBLOCK + }; + if new != previous { + cvt(libc::fcntl(self.fd, libc::F_SETFL, new))?; + } + Ok(()) + } + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + let how = match how { + Shutdown::Write => libc::SHUT_WR, + Shutdown::Read => libc::SHUT_RD, + Shutdown::Both => libc::SHUT_RDWR, + }; + cvt(unsafe { libc::shutdown(self.fd, how) })?; + Ok(()) + } + + pub fn recv(&self, buf: &mut [u8]) -> io::Result { + unsafe { + let n = cvt({ + libc::recv(self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), max_len()), + 0) + })?; + Ok(n as usize) + } + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + unsafe { + let n = cvt({ + libc::recv(self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), max_len()), + libc::MSG_PEEK) + })?; + Ok(n as usize) + } + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.recvfrom(buf, 0) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.recvfrom(buf, libc::MSG_PEEK) + } + + fn recvfrom(&self, buf: &mut [u8], flags: c_int) + -> io::Result<(usize, SockAddr)> { + unsafe { + let mut storage: libc::sockaddr_storage = mem::zeroed(); + let mut addrlen = mem::size_of_val(&storage) as socklen_t; + + let n = cvt({ + libc::recvfrom(self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), max_len()), + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen) + })?; + let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, addrlen); + Ok((n as usize, addr)) + } + } + + pub fn send(&self, buf: &[u8]) -> io::Result { + unsafe { + let n = cvt({ + libc::send(self.fd, + buf.as_ptr() as *const c_void, + cmp::min(buf.len(), max_len()), + MSG_NOSIGNAL) + })?; + Ok(n as usize) + } + } + + pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { + unsafe { + let n = cvt({ + libc::sendto(self.fd, + buf.as_ptr() as *const c_void, + cmp::min(buf.len(), max_len()), + MSG_NOSIGNAL, + addr.as_ptr(), + addr.len()) + })?; + Ok(n as usize) + } + } + + // ================================================ + + pub fn ttl(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_IP, libc::IP_TTL)?; + Ok(raw as u32) + } + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_IP, libc::IP_TTL, ttl as c_int) + } + } + + pub fn only_v6(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, + libc::IPV6_V6ONLY)?; + Ok(raw != 0) + } + } + + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_IPV6, + libc::IPV6_V6ONLY, + only_v6 as c_int) + } + } + + pub fn read_timeout(&self) -> io::Result> { + unsafe { + Ok(timeval2dur(self.getsockopt(libc::SOL_SOCKET, libc::SO_RCVTIMEO)?)) + } + } + + pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, + libc::SO_RCVTIMEO, + dur2timeval(dur)?) + } + } + + pub fn write_timeout(&self) -> io::Result> { + unsafe { + Ok(timeval2dur(self.getsockopt(libc::SOL_SOCKET, libc::SO_SNDTIMEO)?)) + } + } + + pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, + libc::SO_SNDTIMEO, + dur2timeval(dur)?) + } + } + + pub fn nodelay(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_TCP, + libc::TCP_NODELAY)?; + Ok(raw != 0) + } + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_TCP, + libc::TCP_NODELAY, + nodelay as c_int) + } + } + + pub fn broadcast(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, + libc::SO_BROADCAST)?; + Ok(raw != 0) + } + } + + pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, + libc::SO_BROADCAST, + broadcast as c_int) + } + } + + pub fn multicast_loop_v4(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_IP, + libc::IP_MULTICAST_LOOP)?; + Ok(raw != 0) + } + } + + pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_IP, + libc::IP_MULTICAST_LOOP, + multicast_loop_v4 as c_int) + } + } + + pub fn multicast_ttl_v4(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_IP, + libc::IP_MULTICAST_TTL)?; + Ok(raw as u32) + } + } + + pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_IP, + libc::IP_MULTICAST_TTL, + multicast_ttl_v4 as c_int) + } + } + + pub fn multicast_loop_v6(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::IPPROTO_IPV6, + libc::IPV6_MULTICAST_LOOP)?; + Ok(raw != 0) + } + } + + pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::IPPROTO_IPV6, + libc::IPV6_MULTICAST_LOOP, + multicast_loop_v6 as c_int) + } + } + + pub fn join_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + let multiaddr = to_s_addr(multiaddr); + let interface = to_s_addr(interface); + let mreq = libc::ip_mreq { + imr_multiaddr: libc::in_addr { s_addr: multiaddr }, + imr_interface: libc::in_addr { s_addr: interface }, + }; + unsafe { + self.setsockopt(libc::IPPROTO_IP, libc::IP_ADD_MEMBERSHIP, mreq) + } + } + + pub fn join_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + let multiaddr = to_in6_addr(multiaddr); + let mreq = libc::ipv6_mreq { + ipv6mr_multiaddr: multiaddr, + ipv6mr_interface: to_ipv6mr_interface(interface), + }; + unsafe { + self.setsockopt(libc::IPPROTO_IP, IPV6_ADD_MEMBERSHIP, mreq) + } + } + + pub fn leave_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + let multiaddr = to_s_addr(multiaddr); + let interface = to_s_addr(interface); + let mreq = libc::ip_mreq { + imr_multiaddr: libc::in_addr { s_addr: multiaddr }, + imr_interface: libc::in_addr { s_addr: interface }, + }; + unsafe { + self.setsockopt(libc::IPPROTO_IP, libc::IP_DROP_MEMBERSHIP, mreq) + } + } + + pub fn leave_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + let multiaddr = to_in6_addr(multiaddr); + let mreq = libc::ipv6_mreq { + ipv6mr_multiaddr: multiaddr, + ipv6mr_interface: to_ipv6mr_interface(interface), + }; + unsafe { + self.setsockopt(libc::IPPROTO_IP, IPV6_DROP_MEMBERSHIP, mreq) + } + } + + pub fn linger(&self) -> io::Result> { + unsafe { + Ok(linger2dur(self.getsockopt(libc::SOL_SOCKET, libc::SO_LINGER)?)) + } + } + + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, libc::SO_LINGER, dur2linger(dur)) + } + } + + pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, libc::SO_REUSEADDR, reuse as c_int) + } + } + + pub fn reuse_address(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, + libc::SO_REUSEADDR)?; + Ok(raw != 0) + } + } + + pub fn recv_buffer_size(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_RCVBUF)?; + Ok(raw as usize) + } + } + + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + unsafe { + // TODO: casting usize to a c_int should be a checked cast + self.setsockopt(libc::SOL_SOCKET, libc::SO_RCVBUF, size as c_int) + } + } + + pub fn send_buffer_size(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, libc::SO_SNDBUF)?; + Ok(raw as usize) + } + } + + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + unsafe { + // TODO: casting usize to a c_int should be a checked cast + self.setsockopt(libc::SOL_SOCKET, libc::SO_SNDBUF, size as c_int) + } + } + + pub fn keepalive(&self) -> io::Result> { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, + libc::SO_KEEPALIVE)?; + if raw == 0 { + return Ok(None) + } + let secs: c_int = self.getsockopt(libc::IPPROTO_TCP, + KEEPALIVE_OPTION)?; + Ok(Some(Duration::new(secs as u64, 0))) + } + } + + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, + libc::SO_KEEPALIVE, + keepalive.is_some() as c_int)?; + if let Some(dur) = keepalive { + // TODO: checked cast here + self.setsockopt(libc::IPPROTO_TCP, + KEEPALIVE_OPTION, + (dur.as_secs() / 1000) as c_int)?; + } + Ok(()) + } + } + + #[cfg(all(unix, feature = "reuseport"))] + pub fn reuse_port(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(libc::SOL_SOCKET, + libc::SO_REUSEPORT)?; + Ok(raw != 0) + } + } + + #[cfg(all(unix, feature = "reuseport"))] + pub fn set_reuse_port(&self, reuse: bool) -> io::Result<()> { + unsafe { + self.setsockopt(libc::SOL_SOCKET, libc::SO_REUSEPORT, reuse as c_int) + } + } + + unsafe fn setsockopt(&self, + opt: c_int, + val: c_int, + payload: T) -> io::Result<()> + where T: Copy, + { + let payload = &payload as *const T as *const c_void; + cvt(libc::setsockopt(self.fd, + opt, + val, + payload, + mem::size_of::() as libc::socklen_t))?; + Ok(()) + } + + unsafe fn getsockopt(&self, opt: c_int, val: c_int) -> io::Result { + let mut slot: T = mem::zeroed(); + let mut len = mem::size_of::() as libc::socklen_t; + cvt(libc::getsockopt(self.fd, + opt, + val, + &mut slot as *mut _ as *mut _, + &mut len))?; + assert_eq!(len as usize, mem::size_of::()); + Ok(slot) + } +} + +impl Read for Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + <&Socket>::read(&mut &*self, buf) + } +} + +impl<'a> Read for &'a Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + unsafe { + let n = cvt({ + libc::read(self.fd, + buf.as_mut_ptr() as *mut c_void, + cmp::min(buf.len(), max_len())) + })?; + Ok(n as usize) + } + } +} + +impl Write for Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + <&Socket>::write(&mut &*self, buf) + } + + fn flush(&mut self) -> io::Result<()> { + <&Socket>::flush(&mut &*self) + } +} + +impl<'a> Write for &'a Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.send(buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl fmt::Debug for Socket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut f = f.debug_struct("Socket"); + f.field("fd", &self.fd); + if let Ok(addr) = self.local_addr() { + f.field("local_addr", &addr); + } + if let Ok(addr) = self.peer_addr() { + f.field("peer_addr", &addr); + } + f.finish() + } +} + +impl AsRawFd for Socket { + fn as_raw_fd(&self) -> c_int { + self.fd + } +} + +impl IntoRawFd for Socket { + fn into_raw_fd(self) -> c_int { + let fd = self.fd; + mem::forget(self); + return fd + } +} + +impl FromRawFd for Socket { + unsafe fn from_raw_fd(fd: c_int) -> Socket { + Socket { fd: fd } + } +} + +impl AsRawFd for ::Socket { + fn as_raw_fd(&self) -> c_int { + self.inner.as_raw_fd() + } +} + +impl IntoRawFd for ::Socket { + fn into_raw_fd(self) -> c_int { + self.inner.into_raw_fd() + } +} + +impl FromRawFd for ::Socket { + unsafe fn from_raw_fd(fd: c_int) -> ::Socket { + ::Socket { inner: Socket::from_raw_fd(fd) } + } +} + +impl Drop for Socket { + fn drop(&mut self) { + unsafe { + let _ = libc::close(self.fd); + } + } +} + +impl From for net::TcpStream { + fn from(socket: Socket) -> net::TcpStream { + unsafe { net::TcpStream::from_raw_fd(socket.into_raw_fd()) } + } +} + +impl From for net::TcpListener { + fn from(socket: Socket) -> net::TcpListener { + unsafe { net::TcpListener::from_raw_fd(socket.into_raw_fd()) } + } +} + +impl From for net::UdpSocket { + fn from(socket: Socket) -> net::UdpSocket { + unsafe { net::UdpSocket::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixStream { + fn from(socket: Socket) -> UnixStream { + unsafe { UnixStream::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixListener { + fn from(socket: Socket) -> UnixListener { + unsafe { UnixListener::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for UnixDatagram { + fn from(socket: Socket) -> UnixDatagram { + unsafe { UnixDatagram::from_raw_fd(socket.into_raw_fd()) } + } +} + +impl From for Socket { + fn from(socket: net::TcpStream) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +impl From for Socket { + fn from(socket: net::TcpListener) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +impl From for Socket { + fn from(socket: net::UdpSocket) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixStream) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixListener) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +#[cfg(all(unix, feature = "unix"))] +impl From for Socket { + fn from(socket: UnixDatagram) -> Socket { + unsafe { Socket::from_raw_fd(socket.into_raw_fd()) } + } +} + +fn max_len() -> usize { + // The maximum read limit on most posix-like systems is `SSIZE_MAX`, + // with the man page quoting that if the count of bytes to read is + // greater than `SSIZE_MAX` the result is "unspecified". + // + // On macOS, however, apparently the 64-bit libc is either buggy or + // intentionally showing odd behavior by rejecting any read with a size + // larger than or equal to INT_MAX. To handle both of these the read + // size is capped on both platforms. + if cfg!(target_os = "macos") { + ::max_value() as usize - 1 + } else { + ::max_value() as usize + } +} + +fn cvt>(t: T) -> io::Result { + let one: T = T::one(); + if t == -one { + Err(io::Error::last_os_error()) + } else { + Ok(t) + } +} + +fn cvt_r(mut f: F) -> io::Result + where F: FnMut() -> T, + T: One + PartialEq + Neg +{ + loop { + match cvt(f()) { + Err(ref e) if e.kind() == ErrorKind::Interrupted => {} + other => return other, + } + } +} + +fn set_cloexec(fd: c_int) -> io::Result<()> { + unsafe { + let previous = cvt(libc::fcntl(fd, libc::F_GETFD))?; + let new = previous | libc::FD_CLOEXEC; + if new != previous { + cvt(libc::fcntl(fd, libc::F_SETFD, new))?; + } + Ok(()) + } +} + +fn dur2timeval(dur: Option) -> io::Result { + match dur { + Some(dur) => { + if dur.as_secs() == 0 && dur.subsec_nanos() == 0 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout")); + } + + let secs = if dur.as_secs() > libc::time_t::max_value() as u64 { + libc::time_t::max_value() + } else { + dur.as_secs() as libc::time_t + }; + let mut timeout = libc::timeval { + tv_sec: secs, + tv_usec: (dur.subsec_nanos() / 1000) as libc::suseconds_t, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + Ok(timeout) + } + None => { + Ok(libc::timeval { + tv_sec: 0, + tv_usec: 0, + }) + } + } +} + +fn timeval2dur(raw: libc::timeval) -> Option { + if raw.tv_sec == 0 && raw.tv_usec == 0 { + None + } else { + let sec = raw.tv_sec as u64; + let nsec = (raw.tv_usec as u32) * 1000; + Some(Duration::new(sec, nsec)) + } +} + +fn to_s_addr(addr: &Ipv4Addr) -> libc::in_addr_t { + let octets = addr.octets(); + ::hton(((octets[0] as libc::in_addr_t) << 24) | + ((octets[1] as libc::in_addr_t) << 16) | + ((octets[2] as libc::in_addr_t) << 8) | + ((octets[3] as libc::in_addr_t) << 0)) +} + +fn to_in6_addr(addr: &Ipv6Addr) -> libc::in6_addr { + let mut ret: libc::in6_addr = unsafe { mem::zeroed() }; + ret.s6_addr = addr.octets(); + return ret +} + +#[cfg(target_os = "android")] +fn to_ipv6mr_interface(value: u32) -> c_int { + value as c_int +} + +#[cfg(not(target_os = "android"))] +fn to_ipv6mr_interface(value: u32) -> libc::c_uint { + value as libc::c_uint +} + +fn linger2dur(linger_opt: libc::linger) -> Option { + if linger_opt.l_onoff == 0 { + None + } else { + Some(Duration::from_secs(linger_opt.l_linger as u64)) + } +} + +fn dur2linger(dur: Option) -> libc::linger { + match dur { + Some(d) => { + libc::linger { + l_onoff: 1, + l_linger: d.as_secs() as c_int, + } + } + None => libc::linger { l_onoff: 0, l_linger: 0 }, + } +} diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/weak.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/weak.rs new file mode 100644 index 000000000..83f979d22 --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/unix/weak.rs @@ -0,0 +1,60 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::marker; +use std::mem; +use std::sync::atomic::{AtomicUsize, Ordering}; + +use libc; + +macro_rules! weak { + (fn $name:ident($($t:ty),*) -> $ret:ty) => ( + #[allow(bad_style)] + static $name: ::sys::weak::Weak $ret> = + ::sys::weak::Weak { + name: concat!(stringify!($name), "\0"), + addr: ::std::sync::atomic::ATOMIC_USIZE_INIT, + _marker: ::std::marker::PhantomData, + }; + ) +} + +pub struct Weak { + pub name: &'static str, + pub addr: AtomicUsize, + pub _marker: marker::PhantomData, +} + +impl Weak { + pub fn get(&self) -> Option<&F> { + assert_eq!(mem::size_of::(), mem::size_of::()); + unsafe { + if self.addr.load(Ordering::SeqCst) == 0 { + let ptr = match fetch(self.name) { + 1 => 1, + n => n, + }; + self.addr.store(ptr, Ordering::SeqCst); + } + if self.addr.load(Ordering::SeqCst) == 1 { + None + } else { + mem::transmute::<&AtomicUsize, Option<&F>>(&self.addr) + } + } + } +} + +unsafe fn fetch(name: &str) -> usize { + let name = name.as_bytes(); + assert_eq!(name[name.len() - 1], 0); + libc::dlsym(libc::RTLD_DEFAULT, name.as_ptr() as *const _) as usize +} + diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/windows.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/windows.rs new file mode 100644 index 000000000..21038e3fe --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/sys/windows.rs @@ -0,0 +1,913 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cmp; +use std::fmt; +use std::io::{Read, Write}; +use std::io; +use std::mem; +use std::net::Shutdown; +use std::net::{self, Ipv4Addr, Ipv6Addr}; +use std::os::windows::prelude::*; +use std::ptr; +use std::sync::{Once, ONCE_INIT}; +use std::time::Duration; + +use kernel32; +use winapi::*; +use ws2_32; + +use SockAddr; + +const HANDLE_FLAG_INHERIT: DWORD = 0x00000001; +const MSG_PEEK: c_int = 0x2; +const SD_BOTH: c_int = 2; +const SD_RECEIVE: c_int = 0; +const SD_SEND: c_int = 1; +const SIO_KEEPALIVE_VALS: DWORD = 0x98000004; +const WSA_FLAG_OVERLAPPED: DWORD = 0x01; + +#[repr(C)] +struct tcp_keepalive { + onoff: c_ulong, + keepalivetime: c_ulong, + keepaliveinterval: c_ulong, +} + +fn init() { + static INIT: Once = ONCE_INIT; + + INIT.call_once(|| { + // Initialize winsock through the standard library by just creating a + // dummy socket. Whether this is successful or not we drop the result as + // libstd will be sure to have initialized winsock. + let _ = net::UdpSocket::bind("127.0.0.1:34254"); + }); +} + +fn last_error() -> io::Error { + io::Error::from_raw_os_error(unsafe { ws2_32::WSAGetLastError() }) +} + +pub struct Socket { + socket: SOCKET, +} + +impl Socket { + pub fn new(family: c_int, ty: c_int, protocol: c_int) -> io::Result { + init(); + unsafe { + let socket = match ws2_32::WSASocketW(family, + ty, + protocol, + ptr::null_mut(), + 0, + WSA_FLAG_OVERLAPPED) { + INVALID_SOCKET => return Err(last_error()), + socket => socket, + }; + let socket = Socket::from_raw_socket(socket); + socket.set_no_inherit()?; + Ok(socket) + } + } + + pub fn bind(&self, addr: &SockAddr) -> io::Result<()> { + unsafe { + if ws2_32::bind(self.socket, addr.as_ptr(), addr.len()) == 0 { + Ok(()) + } else { + Err(last_error()) + } + } + } + + pub fn listen(&self, backlog: i32) -> io::Result<()> { + unsafe { + if ws2_32::listen(self.socket, backlog) == 0 { + Ok(()) + } else { + Err(last_error()) + } + } + } + + pub fn connect(&self, addr: &SockAddr) -> io::Result<()> { + unsafe { + if ws2_32::connect(self.socket, addr.as_ptr(), addr.len()) == 0 { + Ok(()) + } else { + Err(last_error()) + } + } + } + + pub fn connect_timeout(&self, addr: &SockAddr, timeout: Duration) -> io::Result<()> { + self.set_nonblocking(true)?; + let r = self.connect(addr); + self.set_nonblocking(true)?; + + match r { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} + Err(e) => return Err(e), + } + + if timeout.as_secs() == 0 && timeout.subsec_nanos() == 0 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout")); + } + + let mut timeout = timeval { + tv_sec: timeout.as_secs() as c_long, + tv_usec: (timeout.subsec_nanos() / 1000) as c_long, + }; + if timeout.tv_sec == 0 && timeout.tv_usec == 0 { + timeout.tv_usec = 1; + } + + let fds = unsafe { + let mut fds = mem::zeroed::(); + fds.fd_count = 1; + fds.fd_array[0] = self.socket; + fds + }; + + let mut writefds = fds; + let mut errorfds = fds; + + match unsafe { ws2_32::select(1, ptr::null_mut(), &mut writefds, &mut errorfds, &timeout) } { + SOCKET_ERROR => return Err(io::Error::last_os_error()), + 0 => return Err(io::Error::new(io::ErrorKind::TimedOut, "connection timed out")), + _ => { + if writefds.fd_count != 1 { + if let Some(e) = self.take_error()? { + return Err(e); + } + } + Ok(()) + } + } + } + + pub fn local_addr(&self) -> io::Result { + unsafe { + let mut storage: SOCKADDR_STORAGE = mem::zeroed(); + let mut len = mem::size_of_val(&storage) as c_int; + if ws2_32::getsockname(self.socket, + &mut storage as *mut _ as *mut _, + &mut len) != 0 { + return Err(last_error()) + } + Ok(SockAddr::from_raw_parts(&storage as *const _ as *const _, len)) + } + } + + pub fn peer_addr(&self) -> io::Result { + unsafe { + let mut storage: SOCKADDR_STORAGE = mem::zeroed(); + let mut len = mem::size_of_val(&storage) as c_int; + if ws2_32::getpeername(self.socket, + &mut storage as *mut _ as *mut _, + &mut len) != 0 { + return Err(last_error()) + } + Ok(SockAddr::from_raw_parts(&storage as *const _ as *const _, len)) + } + } + + pub fn try_clone(&self) -> io::Result { + unsafe { + let mut info: WSAPROTOCOL_INFOW = mem::zeroed(); + let r = ws2_32::WSADuplicateSocketW(self.socket, + kernel32::GetCurrentProcessId(), + &mut info); + if r != 0 { + return Err(io::Error::last_os_error()) + } + let socket = ws2_32::WSASocketW(info.iAddressFamily, + info.iSocketType, + info.iProtocol, + &mut info, + 0, + WSA_FLAG_OVERLAPPED); + let socket = match socket { + INVALID_SOCKET => return Err(last_error()), + n => Socket::from_raw_socket(n), + }; + socket.set_no_inherit()?; + Ok(socket) + } + } + + pub fn accept(&self) -> io::Result<(Socket, SockAddr)> { + unsafe { + let mut storage: SOCKADDR_STORAGE = mem::zeroed(); + let mut len = mem::size_of_val(&storage) as c_int; + let socket = { + ws2_32::accept(self.socket, + &mut storage as *mut _ as *mut _, + &mut len) + }; + let socket = match socket { + INVALID_SOCKET => return Err(last_error()), + socket => Socket::from_raw_socket(socket), + }; + socket.set_no_inherit()?; + let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, len); + Ok((socket, addr)) + } + } + + pub fn take_error(&self) -> io::Result> { + unsafe { + let raw: c_int = self.getsockopt(SOL_SOCKET, SO_ERROR)?; + if raw == 0 { + Ok(None) + } else { + Ok(Some(io::Error::from_raw_os_error(raw as i32))) + } + } + } + + pub fn set_nonblocking(&self, nonblocking: bool) -> io::Result<()> { + unsafe { + let mut nonblocking = nonblocking as c_ulong; + let r = ws2_32::ioctlsocket(self.socket, + FIONBIO as c_int, + &mut nonblocking); + if r == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } + } + } + + pub fn shutdown(&self, how: Shutdown) -> io::Result<()> { + + let how = match how { + Shutdown::Write => SD_SEND, + Shutdown::Read => SD_RECEIVE, + Shutdown::Both => SD_BOTH, + }; + if unsafe { ws2_32::shutdown(self.socket, how) == 0 } { + Ok(()) + } else { + Err(last_error()) + } + } + + pub fn recv(&self, buf: &mut [u8]) -> io::Result { + unsafe { + let n = { + ws2_32::recv(self.socket, + buf.as_mut_ptr() as *mut c_char, + clamp(buf.len()), + 0) + }; + match n { + SOCKET_ERROR if ws2_32::WSAGetLastError() == WSAESHUTDOWN as i32 => Ok(0), + SOCKET_ERROR => Err(last_error()), + n => Ok(n as usize) + } + } + } + + pub fn peek(&self, buf: &mut [u8]) -> io::Result { + unsafe { + let n = { + ws2_32::recv(self.socket, + buf.as_mut_ptr() as *mut c_char, + clamp(buf.len()), + MSG_PEEK) + }; + match n { + SOCKET_ERROR if ws2_32::WSAGetLastError() == WSAESHUTDOWN as i32 => Ok(0), + SOCKET_ERROR => Err(last_error()), + n => Ok(n as usize) + } + } + } + + pub fn recv_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.recvfrom(buf, 0) + } + + pub fn peek_from(&self, buf: &mut [u8]) -> io::Result<(usize, SockAddr)> { + self.recvfrom(buf, MSG_PEEK) + } + + fn recvfrom(&self, buf: &mut [u8], flags: c_int) + -> io::Result<(usize, SockAddr)> { + unsafe { + let mut storage: SOCKADDR_STORAGE = mem::zeroed(); + let mut addrlen = mem::size_of_val(&storage) as c_int; + + let n = { + ws2_32::recvfrom(self.socket, + buf.as_mut_ptr() as *mut c_char, + clamp(buf.len()), + flags, + &mut storage as *mut _ as *mut _, + &mut addrlen) + }; + let n = match n { + SOCKET_ERROR if ws2_32::WSAGetLastError() == WSAESHUTDOWN as i32 => 0, + SOCKET_ERROR => return Err(last_error()), + n => n as usize, + }; + let addr = SockAddr::from_raw_parts(&storage as *const _ as *const _, addrlen); + Ok((n, addr)) + } + } + + pub fn send(&self, buf: &[u8]) -> io::Result { + unsafe { + let n = { + ws2_32::send(self.socket, + buf.as_ptr() as *const c_char, + clamp(buf.len()), + 0) + }; + if n == SOCKET_ERROR { + Err(last_error()) + } else { + Ok(n as usize) + } + } + } + + pub fn send_to(&self, buf: &[u8], addr: &SockAddr) -> io::Result { + unsafe { + let n = { + ws2_32::sendto(self.socket, + buf.as_ptr() as *const c_char, + clamp(buf.len()), + 0, + addr.as_ptr(), + addr.len()) + }; + if n == SOCKET_ERROR { + Err(last_error()) + } else { + Ok(n as usize) + } + } + } + + // ================================================ + + pub fn ttl(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_IP, IP_TTL)?; + Ok(raw as u32) + } + } + + pub fn set_ttl(&self, ttl: u32) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_IP, IP_TTL, ttl as c_int) + } + } + + pub fn only_v6(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_IPV6.0 as c_int, + IPV6_V6ONLY)?; + Ok(raw != 0) + } + } + + pub fn set_only_v6(&self, only_v6: bool) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_IPV6.0 as c_int, + IPV6_V6ONLY, + only_v6 as c_int) + } + } + + pub fn read_timeout(&self) -> io::Result> { + unsafe { + Ok(ms2dur(self.getsockopt(SOL_SOCKET, SO_RCVTIMEO)?)) + } + } + + pub fn set_read_timeout(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(SOL_SOCKET, SO_RCVTIMEO, dur2ms(dur)?) + } + } + + pub fn write_timeout(&self) -> io::Result> { + unsafe { + Ok(ms2dur(self.getsockopt(SOL_SOCKET, SO_SNDTIMEO)?)) + } + } + + pub fn set_write_timeout(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(SOL_SOCKET, SO_SNDTIMEO, dur2ms(dur)?) + } + } + + pub fn nodelay(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_TCP.0 as c_int, + TCP_NODELAY)?; + Ok(raw != 0) + } + } + + pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_TCP.0 as c_int, + TCP_NODELAY, + nodelay as c_int) + } + } + + pub fn broadcast(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(SOL_SOCKET, SO_BROADCAST)?; + Ok(raw != 0) + } + } + + pub fn set_broadcast(&self, broadcast: bool) -> io::Result<()> { + unsafe { + self.setsockopt(SOL_SOCKET, SO_BROADCAST, broadcast as c_int) + } + } + + pub fn multicast_loop_v4(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_IP, IP_MULTICAST_LOOP)?; + Ok(raw != 0) + } + } + + pub fn set_multicast_loop_v4(&self, multicast_loop_v4: bool) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_IP, + IP_MULTICAST_LOOP, + multicast_loop_v4 as c_int) + } + } + + pub fn multicast_ttl_v4(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_IP, IP_MULTICAST_TTL)?; + Ok(raw as u32) + } + } + + pub fn set_multicast_ttl_v4(&self, multicast_ttl_v4: u32) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_IP, + IP_MULTICAST_TTL, + multicast_ttl_v4 as c_int) + } + } + + pub fn multicast_loop_v6(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(IPPROTO_IPV6.0 as c_int, + IPV6_MULTICAST_LOOP)?; + Ok(raw != 0) + } + } + + pub fn set_multicast_loop_v6(&self, multicast_loop_v6: bool) -> io::Result<()> { + unsafe { + self.setsockopt(IPPROTO_IPV6.0 as c_int, + IPV6_MULTICAST_LOOP, + multicast_loop_v6 as c_int) + } + } + + pub fn join_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + let multiaddr = to_s_addr(multiaddr); + let interface = to_s_addr(interface); + let mreq = ip_mreq { + imr_multiaddr: in_addr { S_un: multiaddr }, + imr_interface: in_addr { S_un: interface }, + }; + unsafe { + self.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq) + } + } + + pub fn join_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + let multiaddr = to_in6_addr(multiaddr); + let mreq = ipv6_mreq { + ipv6mr_multiaddr: multiaddr, + ipv6mr_interface: interface, + }; + unsafe { + self.setsockopt(IPPROTO_IP, IPV6_ADD_MEMBERSHIP, mreq) + } + } + + pub fn leave_multicast_v4(&self, + multiaddr: &Ipv4Addr, + interface: &Ipv4Addr) -> io::Result<()> { + let multiaddr = to_s_addr(multiaddr); + let interface = to_s_addr(interface); + let mreq = ip_mreq { + imr_multiaddr: in_addr { S_un: multiaddr }, + imr_interface: in_addr { S_un: interface }, + }; + unsafe { + self.setsockopt(IPPROTO_IP, IP_DROP_MEMBERSHIP, mreq) + } + } + + pub fn leave_multicast_v6(&self, + multiaddr: &Ipv6Addr, + interface: u32) -> io::Result<()> { + let multiaddr = to_in6_addr(multiaddr); + let mreq = ipv6_mreq { + ipv6mr_multiaddr: multiaddr, + ipv6mr_interface: interface, + }; + unsafe { + self.setsockopt(IPPROTO_IP, IPV6_DROP_MEMBERSHIP, mreq) + } + } + + pub fn linger(&self) -> io::Result> { + unsafe { + Ok(linger2dur(self.getsockopt(SOL_SOCKET, SO_LINGER)?)) + } + } + + pub fn set_linger(&self, dur: Option) -> io::Result<()> { + unsafe { + self.setsockopt(SOL_SOCKET, SO_LINGER, dur2linger(dur)) + } + } + + pub fn set_reuse_address(&self, reuse: bool) -> io::Result<()> { + unsafe { + self.setsockopt(SOL_SOCKET, SO_REUSEADDR, reuse as c_int) + } + } + + pub fn reuse_address(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(SOL_SOCKET, SO_REUSEADDR)?; + Ok(raw != 0) + } + } + + pub fn recv_buffer_size(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(SOL_SOCKET, SO_RCVBUF)?; + Ok(raw as usize) + } + } + + pub fn set_recv_buffer_size(&self, size: usize) -> io::Result<()> { + unsafe { + // TODO: casting usize to a c_int should be a checked cast + self.setsockopt(SOL_SOCKET, SO_RCVBUF, size as c_int) + } + } + + pub fn send_buffer_size(&self) -> io::Result { + unsafe { + let raw: c_int = self.getsockopt(SOL_SOCKET, SO_SNDBUF)?; + Ok(raw as usize) + } + } + + pub fn set_send_buffer_size(&self, size: usize) -> io::Result<()> { + unsafe { + // TODO: casting usize to a c_int should be a checked cast + self.setsockopt(SOL_SOCKET, SO_SNDBUF, size as c_int) + } + } + + pub fn keepalive(&self) -> io::Result> { + let mut ka = tcp_keepalive { + onoff: 0, + keepalivetime: 0, + keepaliveinterval: 0, + }; + let n = unsafe { + ws2_32::WSAIoctl(self.socket, + SIO_KEEPALIVE_VALS, + 0 as *mut _, + 0, + &mut ka as *mut _ as *mut _, + mem::size_of_val(&ka) as DWORD, + 0 as *mut _, + 0 as *mut _, + None) + }; + if n == 0 { + Ok(if ka.onoff == 0 { + None + } else if ka.keepaliveinterval == 0 { + None + } else { + let seconds = ka.keepaliveinterval / 1000; + let nanos = (ka.keepaliveinterval % 1000) * 1_000_000; + Some(Duration::new(seconds as u64, nanos as u32)) + }) + } else { + Err(last_error()) + } + } + + pub fn set_keepalive(&self, keepalive: Option) -> io::Result<()> { + let ms = dur2ms(keepalive)?; + // TODO: checked casts here + let ka = tcp_keepalive { + onoff: keepalive.is_some() as c_ulong, + keepalivetime: ms as c_ulong, + keepaliveinterval: ms as c_ulong, + }; + let n = unsafe { + ws2_32::WSAIoctl(self.socket, + SIO_KEEPALIVE_VALS, + &ka as *const _ as *mut _, + mem::size_of_val(&ka) as DWORD, + 0 as *mut _, + 0, + 0 as *mut _, + 0 as *mut _, + None) + }; + if n == 0 { + Ok(()) + } else { + Err(last_error()) + } + } + + unsafe fn setsockopt(&self, + opt: c_int, + val: c_int, + payload: T) -> io::Result<()> + where T: Copy, + { + let payload = &payload as *const T as *const c_char; + if ws2_32::setsockopt(self.socket, + opt, + val, + payload, + mem::size_of::() as c_int) == 0 { + Ok(()) + } else { + Err(last_error()) + } + } + + unsafe fn getsockopt(&self, opt: c_int, val: c_int) -> io::Result { + let mut slot: T = mem::zeroed(); + let mut len = mem::size_of::() as c_int; + if ws2_32::getsockopt(self.socket, + opt, + val, + &mut slot as *mut _ as *mut _, + &mut len) == 0 { + assert_eq!(len as usize, mem::size_of::()); + Ok(slot) + } else { + Err(last_error()) + } + } + + fn set_no_inherit(&self) -> io::Result<()> { + unsafe { + let r = kernel32::SetHandleInformation(self.socket as HANDLE, + HANDLE_FLAG_INHERIT, + 0); + if r == 0 { + Err(io::Error::last_os_error()) + } else { + Ok(()) + } + } + } +} + +impl Read for Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + <&Socket>::read(&mut &*self, buf) + } +} + +impl<'a> Read for &'a Socket { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.recv(buf) + } +} + +impl Write for Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + <&Socket>::write(&mut &*self, buf) + } + + fn flush(&mut self) -> io::Result<()> { + <&Socket>::flush(&mut &*self) + } +} + +impl<'a> Write for &'a Socket { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.send(buf) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl fmt::Debug for Socket { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut f = f.debug_struct("Socket"); + f.field("socket", &self.socket); + if let Ok(addr) = self.local_addr() { + f.field("local_addr", &addr); + } + if let Ok(addr) = self.peer_addr() { + f.field("peer_addr", &addr); + } + f.finish() + } +} + +impl AsRawSocket for Socket { + fn as_raw_socket(&self) -> SOCKET { + self.socket + } +} + +impl IntoRawSocket for Socket { + fn into_raw_socket(self) -> SOCKET { + let socket = self.socket; + mem::forget(self); + return socket + } +} + +impl FromRawSocket for Socket { + unsafe fn from_raw_socket(socket: SOCKET) -> Socket { + Socket { socket: socket } + } +} + +impl AsRawSocket for ::Socket { + fn as_raw_socket(&self) -> SOCKET { + self.inner.as_raw_socket() + } +} + +impl IntoRawSocket for ::Socket { + fn into_raw_socket(self) -> SOCKET { + self.inner.into_raw_socket() + } +} + +impl FromRawSocket for ::Socket { + unsafe fn from_raw_socket(socket: SOCKET) -> ::Socket { + ::Socket { inner: Socket::from_raw_socket(socket) } + } +} + +impl Drop for Socket { + fn drop(&mut self) { + unsafe { + let _ = ws2_32::closesocket(self.socket); + } + } +} + +impl From for net::TcpStream { + fn from(socket: Socket) -> net::TcpStream { + unsafe { net::TcpStream::from_raw_socket(socket.into_raw_socket()) } + } +} + +impl From for net::TcpListener { + fn from(socket: Socket) -> net::TcpListener { + unsafe { net::TcpListener::from_raw_socket(socket.into_raw_socket()) } + } +} + +impl From for net::UdpSocket { + fn from(socket: Socket) -> net::UdpSocket { + unsafe { net::UdpSocket::from_raw_socket(socket.into_raw_socket()) } + } +} + +impl From for Socket { + fn from(socket: net::TcpStream) -> Socket { + unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } + } +} + +impl From for Socket { + fn from(socket: net::TcpListener) -> Socket { + unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } + } +} + +impl From for Socket { + fn from(socket: net::UdpSocket) -> Socket { + unsafe { Socket::from_raw_socket(socket.into_raw_socket()) } + } +} + +fn clamp(input: usize) -> c_int { + cmp::min(input, ::max_value() as usize) as c_int +} + +fn dur2ms(dur: Option) -> io::Result { + match dur { + Some(dur) => { + // Note that a duration is a (u64, u32) (seconds, nanoseconds) + // pair, and the timeouts in windows APIs are typically u32 + // milliseconds. To translate, we have two pieces to take care of: + // + // * Nanosecond precision is rounded up + // * Greater than u32::MAX milliseconds (50 days) is rounded up to + // INFINITE (never time out). + let ms = dur.as_secs().checked_mul(1000).and_then(|ms| { + ms.checked_add((dur.subsec_nanos() as u64) / 1_000_000) + }).and_then(|ms| { + ms.checked_add(if dur.subsec_nanos() % 1_000_000 > 0 {1} else {0}) + }).map(|ms| { + if ms > ::max_value() as u64 { + INFINITE + } else { + ms as DWORD + } + }).unwrap_or(INFINITE); + if ms == 0 { + return Err(io::Error::new(io::ErrorKind::InvalidInput, + "cannot set a 0 duration timeout")); + } + Ok(ms) + } + None => Ok(0), + } +} + +fn ms2dur(raw: DWORD) -> Option { + if raw == 0 { + None + } else { + let secs = raw / 1000; + let nsec = (raw % 1000) * 1000000; + Some(Duration::new(secs as u64, nsec as u32)) + } +} + +fn to_s_addr(addr: &Ipv4Addr) -> ULONG { + let octets = addr.octets(); + ::hton(((octets[0] as ULONG) << 24) | + ((octets[1] as ULONG) << 16) | + ((octets[2] as ULONG) << 8) | + ((octets[3] as ULONG) << 0)) +} + +fn to_in6_addr(addr: &Ipv6Addr) -> in6_addr { + let mut ret: in6_addr = unsafe { mem::zeroed() }; + ret.s6_addr = addr.octets(); + return ret +} + +fn linger2dur(linger_opt: linger) -> Option { + if linger_opt.l_onoff == 0 { + None + } else { + Some(Duration::from_secs(linger_opt.l_linger as u64)) + } +} + +fn dur2linger(dur: Option) -> linger { + match dur { + Some(d) => { + linger { + l_onoff: 1, + l_linger: d.as_secs() as u16, + } + } + None => linger { l_onoff: 0, l_linger: 0 }, + } +} diff --git a/collector/compile-benchmarks/cargo/socket2-0.2.3/src/utils.rs b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/utils.rs new file mode 100644 index 000000000..21fe9d61f --- /dev/null +++ b/collector/compile-benchmarks/cargo/socket2-0.2.3/src/utils.rs @@ -0,0 +1,51 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +#[doc(hidden)] +pub trait NetInt { + fn from_be(i: Self) -> Self; + fn to_be(&self) -> Self; +} +macro_rules! doit { + ($($t:ident)*) => ($(impl NetInt for $t { + fn from_be(i: Self) -> Self { <$t>::from_be(i) } + fn to_be(&self) -> Self { <$t>::to_be(*self) } + })*) +} +doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } + +#[doc(hidden)] +pub trait One { + fn one() -> Self; +} + +macro_rules! one { + ($($t:ident)*) => ($( + impl One for $t { fn one() -> $t { 1 } } + )*) +} + +one! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } + + +#[doc(hidden)] +pub trait Zero { + fn zero() -> Self; +} + +macro_rules! zero { + ($($t:ident)*) => ($( + impl Zero for $t { fn zero() -> $t { 0 } } + )*) +} + +zero! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } + diff --git a/collector/compile-benchmarks/cargo/src/bin/bench.rs b/collector/compile-benchmarks/cargo/src/bin/bench.rs new file mode 100644 index 000000000..1aa82dd9f --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/bench.rs @@ -0,0 +1,153 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, MessageFormat, Packages}; +use cargo::util::{CliResult, CliError, Config, CargoErrorKind}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_no_run: bool, + flag_package: Vec, + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_no_fail_fast: bool, + flag_frozen: bool, + flag_locked: bool, + arg_args: Vec, + flag_all: bool, + flag_exclude: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Execute all benchmarks of a local package + +Usage: + cargo bench [options] [--] [...] + +Options: + -h, --help Print this message + --lib Benchmark only this package's library + --bin NAME ... Benchmark only the specified binary + --bins Benchmark all binaries + --example NAME ... Benchmark only the specified example + --examples Benchmark all examples + --test NAME ... Benchmark only the specified test target + --tests Benchmark all tests + --bench NAME ... Benchmark only the specified bench target + --benches Benchmark all benches + --all-targets Benchmark all targets (default) + --no-run Compile, but don't run benchmarks + -p SPEC, --package SPEC ... Package to run benchmarks for + --all Benchmark all packages in the workspace + --exclude SPEC ... Exclude packages from the benchmark + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to build benchmarks for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --no-fail-fast Run all benchmarks regardless of failure + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +All of the trailing arguments are passed to the benchmark binaries generated +for filtering benchmarks and generally providing options configuring how they +run. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be benchmarked. If it is not given, then +the current package is benchmarked. For more information on SPEC and its format, +see the `cargo help pkgid` command. + +All packages in the workspace are benchmarked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The --jobs argument affects the building of the benchmark executable but does +not affect how many jobs are used when running the benchmarks. + +Compilation can be customized with the `bench` profile in the manifest. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-bench; args={:?}", + env::args().collect::>()); + + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let spec = Packages::from_flags(ws.is_virtual(), + options.flag_all, + &options.flag_exclude, + &options.flag_package)?; + + let ops = ops::TestOptions { + no_run: options.flag_no_run, + no_fail_fast: options.flag_no_fail_fast, + only_doc: false, + compile_opts: ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|s| &s[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + release: true, + mode: ops::CompileMode::Bench, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets), + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: None, + }, + }; + + let err = ops::run_benches(&ws, &ops, &options.arg_args)?; + match err { + None => Ok(()), + Some(err) => { + Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new("bench failed".into(), i), + None => CliError::new(CargoErrorKind::CargoTestErrorKind(err).into(), 101) + }) + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/bin/build.rs b/collector/compile-benchmarks/cargo/src/bin/build.rs new file mode 100644 index 000000000..883e30db6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/build.rs @@ -0,0 +1,131 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, CompileOptions, MessageFormat, Packages}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_package: Vec, + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_release: bool, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_locked: bool, + flag_frozen: bool, + flag_all: bool, + flag_exclude: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Compile a local package and all of its dependencies + +Usage: + cargo build [options] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC ... Package to build + --all Build all packages in the workspace + --exclude SPEC ... Exclude packages from the build + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --lib Build only this package's library + --bin NAME Build only the specified binary + --bins Build all binaries + --example NAME Build only the specified example + --examples Build all examples + --test NAME Build only the specified test target + --tests Build all tests + --bench NAME Build only the specified bench target + --benches Build all benches + --all-targets Build all targets (lib and bin targets by default) + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to compile + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be built. If it is not given, then the +current package is built. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are built if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the --release flag will use the `release` profile instead. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-build; args={:?}", + env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let spec = Packages::from_flags(ws.is_virtual(), + options.flag_all, + &options.flag_exclude, + &options.flag_package)?; + + let opts = CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + mode: ops::CompileMode::Build, + release: options.flag_release, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets), + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: None, + }; + + ops::compile(&ws, &opts)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/cargo.rs b/collector/compile-benchmarks/cargo/src/bin/cargo.rs new file mode 100644 index 000000000..e92bcdbe5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/cargo.rs @@ -0,0 +1,427 @@ +extern crate cargo; +extern crate env_logger; +extern crate git2_curl; +extern crate toml; +#[macro_use] +extern crate log; +#[macro_use] +extern crate serde_derive; +extern crate serde_json; + +use std::collections::BTreeSet; +use std::collections::HashMap; +use std::env; +use std::fs; +use std::path::{Path, PathBuf}; + +use cargo::core::shell::{Shell, Verbosity}; +use cargo::util::{self, CliResult, lev_distance, Config, CargoResult, CargoError, CargoErrorKind}; +use cargo::util::CliError; + +#[derive(Deserialize)] +pub struct Flags { + flag_list: bool, + flag_version: bool, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_explain: Option, + arg_command: String, + arg_args: Vec, + flag_locked: bool, + flag_frozen: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +const USAGE: &'static str = " +Rust's package manager + +Usage: + cargo [...] + cargo [options] + +Options: + -h, --help Display this message + -V, --version Print version info and exit + --list List installed commands + --explain CODE Run `rustc --explain CODE` + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +Some common cargo commands are (see all commands with --list): + build Compile the current project + check Analyze the current project and report errors, but don't build object files + clean Remove the target directory + doc Build this project's and its dependencies' documentation + new Create a new cargo project + init Create a new cargo project in an existing directory + run Build and execute src/main.rs + test Run the tests + bench Run the benchmarks + update Update dependencies listed in Cargo.lock + search Search registry for crates + publish Package and upload this project to the registry + install Install a Rust binary + uninstall Uninstall a Rust binary + +See 'cargo help ' for more information on a specific command. +"; + +fn main() { + env_logger::init().unwrap(); + + let mut config = match Config::default() { + Ok(cfg) => cfg, + Err(e) => { + let mut shell = Shell::new(); + cargo::exit_with_error(e.into(), &mut shell) + } + }; + + let result = (|| { + let args: Vec<_> = try!(env::args_os() + .map(|s| { + s.into_string().map_err(|s| { + CargoError::from(format!("invalid unicode in argument: {:?}", s)) + }) + }) + .collect()); + let rest = &args; + cargo::call_main_without_stdin(execute, &mut config, USAGE, rest, true) + })(); + + match result { + Err(e) => cargo::exit_with_error(e, &mut *config.shell()), + Ok(()) => {} + } +} + +macro_rules! each_subcommand{ + ($mac:ident) => { + $mac!(bench); + $mac!(build); + $mac!(check); + $mac!(clean); + $mac!(doc); + $mac!(fetch); + $mac!(generate_lockfile); + $mac!(git_checkout); + $mac!(help); + $mac!(init); + $mac!(install); + $mac!(locate_project); + $mac!(login); + $mac!(metadata); + $mac!(new); + $mac!(owner); + $mac!(package); + $mac!(pkgid); + $mac!(publish); + $mac!(read_manifest); + $mac!(run); + $mac!(rustc); + $mac!(rustdoc); + $mac!(search); + $mac!(test); + $mac!(uninstall); + $mac!(update); + $mac!(verify_project); + $mac!(version); + $mac!(yank); + } +} + +macro_rules! declare_mod { + ($name:ident) => ( pub mod $name; ) +} +each_subcommand!(declare_mod); + +/** + The top-level `cargo` command handles configuration and project location + because they are fundamental (and intertwined). Other commands can rely + on this top-level information. +*/ +fn execute(flags: Flags, config: &mut Config) -> CliResult { + config.configure(flags.flag_verbose, + flags.flag_quiet, + &flags.flag_color, + flags.flag_frozen, + flags.flag_locked, + &flags.flag_z)?; + + init_git_transports(config); + let _token = cargo::util::job::setup(); + + if flags.flag_version { + let version = cargo::version(); + println!("{}", version); + if flags.flag_verbose > 0 { + println!("release: {}.{}.{}", + version.major, + version.minor, + version.patch); + if let Some(ref cfg) = version.cfg_info { + if let Some(ref ci) = cfg.commit_info { + println!("commit-hash: {}", ci.commit_hash); + println!("commit-date: {}", ci.commit_date); + } + } + } + return Ok(()); + } + + if flags.flag_list { + println!("Installed Commands:"); + for command in list_commands(config) { + println!(" {}", command); + } + return Ok(()); + } + + if let Some(ref code) = flags.flag_explain { + let mut procss = config.rustc()?.process(); + procss.arg("--explain").arg(code).exec()?; + return Ok(()); + } + + let args = match &flags.arg_command[..] { + // For the commands `cargo` and `cargo help`, re-execute ourselves as + // `cargo -h` so we can go through the normal process of printing the + // help message. + "" | "help" if flags.arg_args.is_empty() => { + config.shell().set_verbosity(Verbosity::Verbose); + let args = &["cargo".to_string(), "-h".to_string()]; + return cargo::call_main_without_stdin(execute, config, USAGE, args, false); + } + + // For `cargo help -h` and `cargo help --help`, print out the help + // message for `cargo help` + "help" if flags.arg_args[0] == "-h" || flags.arg_args[0] == "--help" => { + vec!["cargo".to_string(), "help".to_string(), "-h".to_string()] + } + + // For `cargo help foo`, print out the usage message for the specified + // subcommand by executing the command with the `-h` flag. + "help" => vec!["cargo".to_string(), flags.arg_args[0].clone(), "-h".to_string()], + + // For all other invocations, we're of the form `cargo foo args...`. We + // use the exact environment arguments to preserve tokens like `--` for + // example. + _ => { + let mut default_alias = HashMap::new(); + default_alias.insert("b", "build".to_string()); + default_alias.insert("t", "test".to_string()); + default_alias.insert("r", "run".to_string()); + let mut args: Vec = env::args().collect(); + if let Some(new_command) = default_alias.get(&args[1][..]) { + args[1] = new_command.clone(); + } + args + } + }; + + if let Some(r) = try_execute_builtin_command(config, &args) { + return r; + } + + let alias_list = aliased_command(config, &args[1])?; + let args = match alias_list { + Some(alias_command) => { + let chain = args.iter() + .take(1) + .chain(alias_command.iter()) + .chain(args.iter().skip(2)) + .map(|s| s.to_string()) + .collect::>(); + if let Some(r) = try_execute_builtin_command(config, &chain) { + return r; + } else { + chain + } + } + None => args, + }; + + execute_external_subcommand(config, &args[1], &args) +} + +fn try_execute_builtin_command(config: &mut Config, args: &[String]) -> Option { + macro_rules! cmd { + ($name:ident) => (if args[1] == stringify!($name).replace("_", "-") { + config.shell().set_verbosity(Verbosity::Verbose); + let r = cargo::call_main_without_stdin($name::execute, + config, + $name::USAGE, + &args, + false); + return Some(r); + }) + } + each_subcommand!(cmd); + + None +} + +fn aliased_command(config: &Config, command: &str) -> CargoResult>> { + let alias_name = format!("alias.{}", command); + let mut result = Ok(None); + match config.get_string(&alias_name) { + Ok(value) => { + if let Some(record) = value { + let alias_commands = record.val + .split_whitespace() + .map(|s| s.to_string()) + .collect(); + result = Ok(Some(alias_commands)); + } + } + Err(_) => { + let value = config.get_list(&alias_name)?; + if let Some(record) = value { + let alias_commands: Vec = record.val + .iter() + .map(|s| s.0.to_string()) + .collect(); + result = Ok(Some(alias_commands)); + } + } + } + result +} + +fn find_closest(config: &Config, cmd: &str) -> Option { + let cmds = list_commands(config); + // Only consider candidates with a lev_distance of 3 or less so we don't + // suggest out-of-the-blue options. + let mut filtered = cmds.iter() + .map(|c| (lev_distance(c, cmd), c)) + .filter(|&(d, _)| d < 4) + .collect::>(); + filtered.sort_by(|a, b| a.0.cmp(&b.0)); + filtered.get(0).map(|slot| slot.1.clone()) +} + +fn execute_external_subcommand(config: &Config, cmd: &str, args: &[String]) -> CliResult { + let command_exe = format!("cargo-{}{}", cmd, env::consts::EXE_SUFFIX); + let path = search_directories(config) + .iter() + .map(|dir| dir.join(&command_exe)) + .find(|file| is_executable(file)); + let command = match path { + Some(command) => command, + None => { + return Err(CargoError::from(match find_closest(config, cmd) { + Some(closest) => { + format!("no such subcommand: `{}`\n\n\tDid you mean `{}`?\n", + cmd, + closest) + } + None => format!("no such subcommand: `{}`", cmd), + }) + .into()) + } + }; + + let cargo_exe = config.cargo_exe()?; + let err = match util::process(&command) + .env(cargo::CARGO_ENV, cargo_exe) + .args(&args[1..]) + .exec_replace() { + Ok(()) => return Ok(()), + Err(e) => e, + }; + + if let &CargoErrorKind::ProcessErrorKind(ref perr) = err.kind() { + if let Some(code) = perr.exit.as_ref().and_then(|c| c.code()) { + return Err(CliError::code(code)); + } + } + Err(CliError::new(err, 101)) +} + +/// List all runnable commands +fn list_commands(config: &Config) -> BTreeSet { + let prefix = "cargo-"; + let suffix = env::consts::EXE_SUFFIX; + let mut commands = BTreeSet::new(); + for dir in search_directories(config) { + let entries = match fs::read_dir(dir) { + Ok(entries) => entries, + _ => continue, + }; + for entry in entries.filter_map(|e| e.ok()) { + let path = entry.path(); + let filename = match path.file_name().and_then(|s| s.to_str()) { + Some(filename) => filename, + _ => continue, + }; + if !filename.starts_with(prefix) || !filename.ends_with(suffix) { + continue; + } + if is_executable(entry.path()) { + let end = filename.len() - suffix.len(); + commands.insert(filename[prefix.len()..end].to_string()); + } + } + } + + macro_rules! add_cmd { + ($cmd:ident) => ({ commands.insert(stringify!($cmd).replace("_", "-")); }) + } + each_subcommand!(add_cmd); + commands +} + +#[cfg(unix)] +fn is_executable>(path: P) -> bool { + use std::os::unix::prelude::*; + fs::metadata(path) + .map(|metadata| metadata.is_file() && metadata.permissions().mode() & 0o111 != 0) + .unwrap_or(false) +} +#[cfg(windows)] +fn is_executable>(path: P) -> bool { + fs::metadata(path).map(|metadata| metadata.is_file()).unwrap_or(false) +} + +fn search_directories(config: &Config) -> Vec { + let mut dirs = vec![config.home().clone().into_path_unlocked().join("bin")]; + if let Some(val) = env::var_os("PATH") { + dirs.extend(env::split_paths(&val)); + } + dirs +} + +fn init_git_transports(config: &Config) { + // Only use a custom transport if a proxy is configured, right now libgit2 + // doesn't support proxies and we have to use a custom transport in this + // case. The custom transport, however, is not as well battle-tested. + match cargo::ops::http_proxy_exists(config) { + Ok(true) => {} + _ => return, + } + + let handle = match cargo::ops::http_handle(config) { + Ok(handle) => handle, + Err(..) => return, + }; + + // The unsafety of the registration function derives from two aspects: + // + // 1. This call must be synchronized with all other registration calls as + // well as construction of new transports. + // 2. The argument is leaked. + // + // We're clear on point (1) because this is only called at the start of this + // binary (we know what the state of the world looks like) and we're mostly + // clear on point (2) because we'd only free it after everything is done + // anyway + unsafe { + git2_curl::register(handle); + } +} diff --git a/collector/compile-benchmarks/cargo/src/bin/check.rs b/collector/compile-benchmarks/cargo/src/bin/check.rs new file mode 100644 index 000000000..982204130 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/check.rs @@ -0,0 +1,132 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, CompileOptions, MessageFormat, Packages}; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +pub const USAGE: &'static str = " +Check a local package and all of its dependencies for errors + +Usage: + cargo check [options] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC ... Package(s) to check + --all Check all packages in the workspace + --exclude SPEC ... Exclude packages from the check + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --lib Check only this package's library + --bin NAME Check only the specified binary + --bins Check all binaries + --example NAME Check only the specified example + --examples Check all examples + --test NAME Check only the specified test target + --tests Check all tests + --bench NAME Check only the specified bench target + --benches Check all benches + --all-targets Check all targets (lib and bin targets by default) + --release Check artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also check + --all-features Check all available features + --no-default-features Do not check the `default` feature + --target TRIPLE Check for the target triple + --manifest-path PATH Path to the manifest to compile + -v, --verbose ... Use verbose output + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be built. If it is not given, then the +current package is built. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are checked if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +Compilation can be configured via the use of profiles which are configured in +the manifest. The default profile for this command is `dev`, but passing +the --release flag will use the `release` profile instead. +"; + +#[derive(Deserialize)] +pub struct Options { + flag_package: Vec, + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_release: bool, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_locked: bool, + flag_frozen: bool, + flag_all: bool, + flag_exclude: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-check; args={:?}", + env::args().collect::>()); + + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let spec = Packages::from_flags(ws.is_virtual(), + options.flag_all, + &options.flag_exclude, + &options.flag_package)?; + + let opts = CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + mode: ops::CompileMode::Check, + release: options.flag_release, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets), + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: None, + }; + + ops::compile(&ws, &opts)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/clean.rs b/collector/compile-benchmarks/cargo/src/bin/clean.rs new file mode 100644 index 000000000..446b5e502 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/clean.rs @@ -0,0 +1,67 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_package: Vec, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_release: bool, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Remove artifacts that cargo has generated in the past + +Usage: + cargo clean [options] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC ... Package to clean artifacts for + --manifest-path PATH Path to the manifest to the package to clean + --target TRIPLE Target triple to clean output for (default all) + --release Whether or not to clean release artifacts + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +If the --package argument is given, then SPEC is a package id specification +which indicates which package's artifacts should be cleaned out. If it is not +given, then all packages' artifacts are removed. For more information on SPEC +and its format, see the `cargo help pkgid` command. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-clean; args={:?}", env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let opts = ops::CleanOptions { + config: config, + spec: &options.flag_package, + target: options.flag_target.as_ref().map(|s| &s[..]), + release: options.flag_release, + }; + let ws = Workspace::new(&root, config)?; + ops::clean(&ws, &opts)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/doc.rs b/collector/compile-benchmarks/cargo/src/bin/doc.rs new file mode 100644 index 000000000..6e1783696 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/doc.rs @@ -0,0 +1,126 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, MessageFormat, Packages}; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_target: Option, + flag_features: Vec, + flag_all_features: bool, + flag_jobs: Option, + flag_manifest_path: Option, + flag_no_default_features: bool, + flag_no_deps: bool, + flag_open: bool, + flag_release: bool, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_package: Vec, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_frozen: bool, + flag_locked: bool, + flag_all: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Build a package's documentation + +Usage: + cargo doc [options] + +Options: + -h, --help Print this message + --open Opens the docs in a browser after the operation + -p SPEC, --package SPEC ... Package to document + --all Document all packages in the workspace + --no-deps Don't build documentation for dependencies + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --lib Document only this package's library + --bin NAME Document only the specified binary + --bins Document all binaries + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to document + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +By default the documentation for the local package and all dependencies is +built. The output is all placed in `target/doc` in rustdoc's usual format. + +All packages in the workspace are documented if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-check; args={:?}", + env::args().collect::>()); + + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let spec = if options.flag_all || (ws.is_virtual() && options.flag_package.is_empty()) { + Packages::All + } else { + Packages::Packages(&options.flag_package) + }; + + let empty = Vec::new(); + let doc_opts = ops::DocOptions { + open_result: options.flag_open, + compile_opts: ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &empty, false, + &empty, false, + &empty, false, + false), + message_format: options.flag_message_format, + release: options.flag_release, + mode: ops::CompileMode::Doc { + deps: !options.flag_no_deps, + }, + target_rustc_args: None, + target_rustdoc_args: None, + }, + }; + + ops::doc(&ws, &doc_opts)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/fetch.rs b/collector/compile-benchmarks/cargo/src/bin/fetch.rs new file mode 100644 index 000000000..880b77eb6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/fetch.rs @@ -0,0 +1,56 @@ +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Fetch dependencies of a package from the network. + +Usage: + cargo fetch [options] + +Options: + -h, --help Print this message + --manifest-path PATH Path to the manifest to fetch dependencies for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +If a lockfile is available, this command will ensure that all of the git +dependencies and/or registries dependencies are downloaded and locally +available. The network is never touched after a `cargo fetch` unless +the lockfile changes. + +If the lockfile is not available, then this is the equivalent of +`cargo generate-lockfile`. A lockfile is generated and dependencies are also +all updated. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + ops::fetch(&ws)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/generate_lockfile.rs b/collector/compile-benchmarks/cargo/src/bin/generate_lockfile.rs new file mode 100644 index 000000000..11cc83639 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/generate_lockfile.rs @@ -0,0 +1,50 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Generate the lockfile for a project + +Usage: + cargo generate-lockfile [options] + +Options: + -h, --help Print this message + --manifest-path PATH Path to the manifest to generate a lockfile for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-generate-lockfile; args={:?}", env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + + let ws = Workspace::new(&root, config)?; + ops::generate_lockfile(&ws)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/git_checkout.rs b/collector/compile-benchmarks/cargo/src/bin/git_checkout.rs new file mode 100644 index 000000000..ec3ae7a5b --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/git_checkout.rs @@ -0,0 +1,54 @@ +use cargo::core::source::{Source, SourceId, GitReference}; +use cargo::sources::git::{GitSource}; +use cargo::util::{Config, CliResult, ToUrl}; + +#[derive(Deserialize)] +pub struct Options { + flag_url: String, + flag_reference: String, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Checkout a copy of a Git repository + +Usage: + cargo git-checkout [options] --url=URL --reference=REF + cargo git-checkout -h | --help + +Options: + -h, --help Print this message + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let Options { flag_url: url, flag_reference: reference, .. } = options; + + let url = url.to_url()?; + + let reference = GitReference::Branch(reference.clone()); + let source_id = SourceId::for_git(&url, reference)?; + + let mut source = GitSource::new(&source_id, config)?; + + source.update()?; + + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/help.rs b/collector/compile-benchmarks/cargo/src/bin/help.rs new file mode 100644 index 000000000..f7f564ee7 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/help.rs @@ -0,0 +1,22 @@ +use cargo::util::{CliResult, CliError, Config}; + +#[derive(Deserialize)] +pub struct Options; + +pub const USAGE: &'static str = " +Get some help with a cargo command. + +Usage: + cargo help + cargo help -h | --help + +Options: + -h, --help Print this message +"; + +pub fn execute(_: Options, _: &mut Config) -> CliResult { + // This is a dummy command just so that `cargo help help` works. + // The actual delegation of help flag to subcommands is handled by the + // cargo command. + Err(CliError::new("help command should not be executed directly".into(), 101)) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/init.rs b/collector/compile-benchmarks/cargo/src/bin/init.rs new file mode 100644 index 000000000..9252ddd39 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/init.rs @@ -0,0 +1,73 @@ +use std::env; + +use cargo::ops; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_bin: bool, + flag_lib: bool, + arg_path: Option, + flag_name: Option, + flag_vcs: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Create a new cargo package in an existing directory + +Usage: + cargo init [options] [] + cargo init -h | --help + +Options: + -h, --help Print this message + --vcs VCS Initialize a new repository for the given version + control system (git, hg, pijul, or fossil) or do not + initialize any version control at all (none), overriding + a global configuration. + --bin Use a binary (application) template + --lib Use a library template [default] + --name NAME Set the resulting package name + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-init; args={:?}", env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let Options { flag_bin, flag_lib, arg_path, flag_name, flag_vcs, .. } = options; + + let path = &arg_path.unwrap_or_else(|| String::from(".")); + let opts = ops::NewOptions::new(flag_vcs, + flag_bin, + flag_lib, + path, + flag_name.as_ref().map(|s| s.as_ref())); + + let opts_lib = opts.lib; + ops::init(&opts, config)?; + + config.shell().status("Created", format!("{} project", + if opts_lib { "library" } + else {"binary (application)"}))?; + + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/install.rs b/collector/compile-benchmarks/cargo/src/bin/install.rs new file mode 100644 index 000000000..c7062d40c --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/install.rs @@ -0,0 +1,163 @@ +use cargo::ops; +use cargo::core::{SourceId, GitReference}; +use cargo::util::{CliResult, Config, ToUrl}; + +#[derive(Deserialize)] +pub struct Options { + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_debug: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_root: Option, + flag_list: bool, + flag_force: bool, + flag_frozen: bool, + flag_locked: bool, + + arg_crate: Vec, + flag_vers: Option, + + flag_git: Option, + flag_branch: Option, + flag_tag: Option, + flag_rev: Option, + + flag_path: Option, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Install a Rust binary + +Usage: + cargo install [options] [...] + cargo install [options] --list + +Specifying what crate to install: + --vers VERS Specify a version to install from crates.io + --git URL Git URL to install the specified crate from + --branch BRANCH Branch to use when installing from git + --tag TAG Tag to use when installing from git + --rev SHA Specific commit to use when installing from git + --path PATH Filesystem path to local crate to install + +Build and install options: + -h, --help Print this message + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + -f, --force Force overwriting existing crates or binaries + --features FEATURES Space-separated list of features to activate + --all-features Build all available features + --no-default-features Do not build the `default` feature + --debug Build in debug mode instead of release mode + --bin NAME Install only the specified binary + --bins Install all binaries + --example NAME Install only the specified example + --examples Install all examples + --root DIR Directory to install packages into + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet Less output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +This command manages Cargo's local set of installed binary crates. Only packages +which have [[bin]] targets can be installed, and all binaries are installed into +the installation root's `bin` folder. The installation root is determined, in +order of precedence, by `--root`, `$CARGO_INSTALL_ROOT`, the `install.root` +configuration key, and finally the home directory (which is either +`$CARGO_HOME` if set or `$HOME/.cargo` by default). + +There are multiple sources from which a crate can be installed. The default +location is crates.io but the `--git` and `--path` flags can change this source. +If the source contains more than one package (such as crates.io or a git +repository with multiple crates) the `` argument is required to indicate +which crate should be installed. + +Crates from crates.io can optionally specify the version they wish to install +via the `--vers` flags, and similarly packages from git repositories can +optionally specify the branch, tag, or revision that should be installed. If a +crate has multiple binaries, the `--bin` argument can selectively install only +one of them, and if you'd rather install examples the `--example` argument can +be used as well. + +By default cargo will refuse to overwrite existing binaries. The `--force` flag +enables overwriting existing binaries. Thus you can reinstall a crate with +`cargo install --force `. + +As a special convenience, omitting the specification entirely will +install the crate in the current directory. That is, `install` is equivalent to +the more explicit `install --path .`. + +The `--list` option will list all installed packages (and their versions). +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let compile_opts = ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: None, + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: ops::Packages::Packages(&[]), + mode: ops::CompileMode::Build, + release: !options.flag_debug, + filter: ops::CompileFilter::new(false, + &options.flag_bin, options.flag_bins, + &[], false, + &options.flag_example, options.flag_examples, + &[], false, + false), + message_format: ops::MessageFormat::Human, + target_rustc_args: None, + target_rustdoc_args: None, + }; + + let source = if let Some(url) = options.flag_git { + let url = url.to_url()?; + let gitref = if let Some(branch) = options.flag_branch { + GitReference::Branch(branch) + } else if let Some(tag) = options.flag_tag { + GitReference::Tag(tag) + } else if let Some(rev) = options.flag_rev { + GitReference::Rev(rev) + } else { + GitReference::Branch("master".to_string()) + }; + SourceId::for_git(&url, gitref)? + } else if let Some(path) = options.flag_path { + SourceId::for_path(&config.cwd().join(path))? + } else if options.arg_crate.is_empty() { + SourceId::for_path(config.cwd())? + } else { + SourceId::crates_io(config)? + }; + + let krates = options.arg_crate.iter().map(|s| &s[..]).collect::>(); + let vers = options.flag_vers.as_ref().map(|s| &s[..]); + let root = options.flag_root.as_ref().map(|s| &s[..]); + + if options.flag_list { + ops::install_list(root, config)?; + } else { + ops::install(root, krates, &source, vers, &compile_opts, options.flag_force)?; + } + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/locate_project.rs b/collector/compile-benchmarks/cargo/src/bin/locate_project.rs new file mode 100644 index 000000000..6e16cca2d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/locate_project.rs @@ -0,0 +1,38 @@ +use cargo; +use cargo::util::{CliResult, CliError, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct LocateProjectFlags { + flag_manifest_path: Option, +} + +pub const USAGE: &'static str = " +Print a JSON representation of a Cargo.toml file's location + +Usage: + cargo locate-project [options] + +Options: + --manifest-path PATH Path to the manifest to locate + -h, --help Print this message +"; + +#[derive(Serialize)] +pub struct ProjectLocation { + root: String +} + +pub fn execute(flags: LocateProjectFlags, config: &mut Config) -> CliResult { + let root = find_root_manifest_for_wd(flags.flag_manifest_path, config.cwd())?; + + let string = root.to_str() + .ok_or_else(|| "Your project path contains \ + characters not representable in \ + Unicode".into()) + .map_err(|e| CliError::new(e, 1))?; + + let location = ProjectLocation { root: string.to_string() }; + cargo::print_json(&location); + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/login.rs b/collector/compile-benchmarks/cargo/src/bin/login.rs new file mode 100644 index 000000000..99ce8e755 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/login.rs @@ -0,0 +1,69 @@ +use std::io::prelude::*; +use std::io; + +use cargo::ops; +use cargo::core::{SourceId, Source}; +use cargo::sources::RegistrySource; +use cargo::util::{CliResult, CargoResultExt, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_host: Option, + arg_token: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Save an api token from the registry locally + +Usage: + cargo login [options] [] + +Options: + -h, --help Print this message + --host HOST Host to set the token for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let token = match options.arg_token.clone() { + Some(token) => token, + None => { + let src = SourceId::crates_io(config)?; + let mut src = RegistrySource::remote(&src, config); + src.update()?; + let config = src.config()?.unwrap(); + let host = options.flag_host.clone().unwrap_or(config.api); + println!("please visit {}me and paste the API Token below", host); + let mut line = String::new(); + let input = io::stdin(); + input.lock().read_line(&mut line).chain_err(|| { + "failed to read stdin" + })?; + line + } + }; + + let token = token.trim().to_string(); + ops::registry_login(config, token)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/metadata.rs b/collector/compile-benchmarks/cargo/src/bin/metadata.rs new file mode 100644 index 000000000..d10fe8c44 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/metadata.rs @@ -0,0 +1,75 @@ +use cargo; +use cargo::core::Workspace; +use cargo::ops::{output_metadata, OutputMetadataOptions}; +use cargo::util::important_paths::find_root_manifest_for_wd; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_color: Option, + flag_features: Vec, + flag_all_features: bool, + flag_format_version: Option, + flag_manifest_path: Option, + flag_no_default_features: bool, + flag_no_deps: bool, + flag_quiet: Option, + flag_verbose: u32, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Output the resolved dependencies of a project, the concrete used versions +including overrides, in machine-readable format. + +Usage: + cargo metadata [options] + +Options: + -h, --help Print this message + --features FEATURES Space-separated list of features + --all-features Build all available features + --no-default-features Do not include the `default` feature + --no-deps Output information only about the root package + and don't fetch dependencies. + --manifest-path PATH Path to the manifest + --format-version VERSION Format version + Valid values: 1 + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let manifest = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + + if options.flag_format_version.is_none() { + config.shell().warn("please specify `--format-version` flag explicitly to \ + avoid compatibility problems")? + } + + let options = OutputMetadataOptions { + features: options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + no_deps: options.flag_no_deps, + version: options.flag_format_version.unwrap_or(1), + }; + + let ws = Workspace::new(&manifest, config)?; + let result = output_metadata(&ws, &options)?; + cargo::print_json(&result); + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/new.rs b/collector/compile-benchmarks/cargo/src/bin/new.rs new file mode 100644 index 000000000..c006fd1e4 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/new.rs @@ -0,0 +1,73 @@ +use std::env; + +use cargo::ops; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_bin: bool, + flag_lib: bool, + arg_path: String, + flag_name: Option, + flag_vcs: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Create a new cargo package at + +Usage: + cargo new [options] + cargo new -h | --help + +Options: + -h, --help Print this message + --vcs VCS Initialize a new repository for the given version + control system (git, hg, pijul, or fossil) or do not + initialize any version control at all (none), overriding + a global configuration. + --bin Use a binary (application) template + --lib Use a library template [default] + --name NAME Set the resulting package name, defaults to the value of + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-new; args={:?}", env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let Options { flag_bin, flag_lib, arg_path, flag_name, flag_vcs, .. } = options; + + let opts = ops::NewOptions::new(flag_vcs, + flag_bin, + flag_lib, + &arg_path, + flag_name.as_ref().map(|s| s.as_ref())); + + let opts_lib = opts.lib; + ops::new(&opts, config)?; + + config.shell().status("Created", format!("{} `{}` project", + if opts_lib { "library" } + else {"binary (application)"}, + arg_path))?; + + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/owner.rs b/collector/compile-benchmarks/cargo/src/bin/owner.rs new file mode 100644 index 000000000..6c76a6faf --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/owner.rs @@ -0,0 +1,68 @@ +use cargo::ops; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + arg_crate: Option, + flag_token: Option, + flag_add: Option>, + flag_remove: Option>, + flag_index: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_list: bool, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Manage the owners of a crate on the registry + +Usage: + cargo owner [options] [] + +Options: + -h, --help Print this message + -a, --add LOGIN Name of a user or team to add as an owner + -r, --remove LOGIN Name of a user or team to remove as an owner + -l, --list List owners of a crate + --index INDEX Registry index to modify owners for + --token TOKEN API token to use when authenticating + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +This command will modify the owners for a package on the specified registry (or +default). Note that owners of a package can upload new versions, yank old +versions. Explicitly named owners can also modify the set of owners, so take +caution! + +See http://doc.crates.io/crates-io.html#cargo-owner for detailed documentation +and troubleshooting. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let opts = ops::OwnersOptions { + krate: options.arg_crate, + token: options.flag_token, + index: options.flag_index, + to_add: options.flag_add, + to_remove: options.flag_remove, + list: options.flag_list, + }; + ops::modify_owners(config, &opts)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/package.rs b/collector/compile-benchmarks/cargo/src/bin/package.rs new file mode 100644 index 000000000..31e3330ad --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/package.rs @@ -0,0 +1,66 @@ +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_target: Option, + flag_manifest_path: Option, + flag_no_verify: bool, + flag_no_metadata: bool, + flag_list: bool, + flag_allow_dirty: bool, + flag_jobs: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Assemble the local package into a distributable tarball + +Usage: + cargo package [options] + +Options: + -h, --help Print this message + -l, --list Print files included in a package without making one + --no-verify Don't verify the contents by building them + --no-metadata Ignore warnings about a lack of human-usable metadata + --allow-dirty Allow dirty working directories to be packaged + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to compile + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + ops::package(&ws, &ops::PackageOpts { + config: config, + verify: !options.flag_no_verify, + list: options.flag_list, + check_metadata: !options.flag_no_metadata, + allow_dirty: options.flag_allow_dirty, + target: options.flag_target.as_ref().map(|t| &t[..]), + jobs: options.flag_jobs, + })?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/pkgid.rs b/collector/compile-benchmarks/cargo/src/bin/pkgid.rs new file mode 100644 index 000000000..01f8a8f6b --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/pkgid.rs @@ -0,0 +1,80 @@ +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_manifest_path: Option, + flag_frozen: bool, + flag_locked: bool, + flag_package: Option, + arg_spec: Option, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Print a fully qualified package specification + +Usage: + cargo pkgid [options] [] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC Argument to get the package id specifier for + --manifest-path PATH Path to the manifest to the package to clean + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +Given a argument, print out the fully qualified package id specifier. +This command will generate an error if is ambiguous as to which package +it refers to in the dependency graph. If no is given, then the pkgid for +the local package is printed. + +This command requires that a lockfile is available and dependencies have been +fetched. + +Example Package IDs + + pkgid | name | version | url + |-----------------------------|--------|-----------|---------------------| + foo | foo | * | * + foo:1.2.3 | foo | 1.2.3 | * + crates.io/foo | foo | * | *://crates.io/foo + crates.io/foo#1.2.3 | foo | 1.2.3 | *://crates.io/foo + crates.io/bar#foo:1.2.3 | foo | 1.2.3 | *://crates.io/bar + http://crates.io/foo#1.2.3 | foo | 1.2.3 | http://crates.io/foo + +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let root = find_root_manifest_for_wd(options.flag_manifest_path.clone(), config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let spec = if options.arg_spec.is_some() { + options.arg_spec + } else if options.flag_package.is_some() { + options.flag_package + } else { + None + }; + let spec = spec.as_ref().map(|s| &s[..]); + let spec = ops::pkgid(&ws, spec)?; + println!("{}", spec); + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/publish.rs b/collector/compile-benchmarks/cargo/src/bin/publish.rs new file mode 100644 index 000000000..c34a0e270 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/publish.rs @@ -0,0 +1,105 @@ +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + flag_index: Option, + flag_host: Option, // TODO: Deprecated, remove + flag_token: Option, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_no_verify: bool, + flag_allow_dirty: bool, + flag_jobs: Option, + flag_dry_run: bool, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Upload a package to the registry + +Usage: + cargo publish [options] + +Options: + -h, --help Print this message + --index INDEX Registry index to upload the package to + --host HOST DEPRECATED, renamed to '--index' + --token TOKEN Token to use when uploading + --no-verify Don't verify package tarball before publish + --allow-dirty Allow publishing with a dirty source directory + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest of the package to publish + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --dry-run Perform all checks without uploading + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let Options { + flag_token: token, + flag_index: index, + flag_host: host, // TODO: Deprecated, remove + flag_manifest_path, + flag_no_verify: no_verify, + flag_allow_dirty: allow_dirty, + flag_jobs: jobs, + flag_dry_run: dry_run, + flag_target: target, + .. + } = options; + + + // TODO: Deprecated + // remove once it has been decided --host can be removed + // We may instead want to repurpose the host flag, as + // mentioned in this issue + // https://github.com/rust-lang/cargo/issues/4208 + let msg = "The flag '--host' is no longer valid. + +Previous versions of Cargo accepted this flag, but it is being +deprecated. The flag is being renamed to 'index', as the flag +wants the location of the index to which to publish. Please +use '--index' instead. + +This will soon become a hard error, so it's either recommended +to update to a fixed version or contact the upstream maintainer +about this warning."; + + let root = find_root_manifest_for_wd(flag_manifest_path.clone(), config.cwd())?; + let ws = Workspace::new(&root, config)?; + ops::publish(&ws, &ops::PublishOpts { + config: config, + token: token, + index: + if host.clone().is_none() || host.clone().unwrap().is_empty() { index } + else { config.shell().warn(&msg)?; host }, // TODO: Deprecated, remove + verify: !no_verify, + allow_dirty: allow_dirty, + target: target.as_ref().map(|t| &t[..]), + jobs: jobs, + dry_run: dry_run, + })?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/read_manifest.rs b/collector/compile-benchmarks/cargo/src/bin/read_manifest.rs new file mode 100644 index 000000000..eee2210d8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/read_manifest.rs @@ -0,0 +1,39 @@ +use std::env; + +use cargo; +use cargo::core::Package; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_manifest_path: Option, + flag_color: Option, +} + +pub const USAGE: &'static str = " +Deprecated, use `cargo metadata --no-deps` instead. +Print a JSON representation of a Cargo.toml manifest. + +Usage: + cargo read-manifest [options] + cargo read-manifest -h | --help + +Options: + -h, --help Print this message + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + --manifest-path PATH Path to the manifest + --color WHEN Coloring: auto, always, never +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-read-manifest; args={:?}", + env::args().collect::>()); + config.shell().set_color_choice(options.flag_color.as_ref().map(|s| &s[..]))?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + + let pkg = Package::for_path(&root, config)?; + cargo::print_json(&pkg); + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/run.rs b/collector/compile-benchmarks/cargo/src/bin/run.rs new file mode 100644 index 000000000..21486baf6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/run.rs @@ -0,0 +1,136 @@ +use std::iter::FromIterator; + +use cargo::core::Workspace; +use cargo::ops::{self, MessageFormat, Packages}; +use cargo::util::{CliResult, CliError, Config, CargoErrorKind}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + flag_bin: Option, + flag_example: Option, + flag_package: Option, + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_release: bool, + flag_frozen: bool, + flag_locked: bool, + arg_args: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Run the main binary of the local package (src/main.rs) + +Usage: + cargo run [options] [--] [...] + +Options: + -h, --help Print this message + --bin NAME Name of the bin target to run + --example NAME Name of the example target to run + -p SPEC, --package SPEC Package with the target to run + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to execute + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +If neither `--bin` nor `--example` are given, then if the project only has one +bin target it will be run. Otherwise `--bin` specifies the bin target to run, +and `--example` specifies the example target to run. At most one of `--bin` or +`--example` can be provided. + +All of the trailing arguments are passed to the binary to run. If you're passing +arguments to both Cargo and the binary, the ones after `--` go to the binary, +the ones before go to Cargo. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + + let (mut examples, mut bins) = (Vec::new(), Vec::new()); + if let Some(s) = options.flag_bin { + bins.push(s); + } + if let Some(s) = options.flag_example { + examples.push(s); + } + + let packages = Vec::from_iter(options.flag_package.iter().cloned()); + let spec = Packages::Packages(&packages); + + let compile_opts = ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + release: options.flag_release, + mode: ops::CompileMode::Build, + filter: if examples.is_empty() && bins.is_empty() { + ops::CompileFilter::Default { required_features_filterable: false, } + } else { + ops::CompileFilter::new(false, + &bins, false, + &[], false, + &examples, false, + &[], false, + false) + }, + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: None, + }; + + let ws = Workspace::new(&root, config)?; + match ops::run(&ws, &compile_opts, &options.arg_args)? { + None => Ok(()), + Some(err) => { + // If we never actually spawned the process then that sounds pretty + // bad and we always want to forward that up. + let exit = match err.exit { + Some(exit) => exit, + None => return Err( + CliError::new(CargoErrorKind::ProcessErrorKind(err).into(), 101)), + }; + + // If `-q` was passed then we suppress extra error information about + // a failed process, we assume the process itself printed out enough + // information about why it failed so we don't do so as well + let exit_code = exit.code().unwrap_or(101); + Err(if options.flag_quiet == Some(true) { + CliError::code(exit_code) + } else { + CliError::new(CargoErrorKind::ProcessErrorKind(err).into(), exit_code) + }) + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/bin/rustc.rs b/collector/compile-benchmarks/cargo/src/bin/rustc.rs new file mode 100644 index 000000000..e6f5dc540 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/rustc.rs @@ -0,0 +1,140 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, CompileOptions, CompileMode, MessageFormat, Packages}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; +use cargo::util::{CliResult, CliError, Config}; + +#[derive(Deserialize)] +pub struct Options { + arg_opts: Option>, + flag_package: Option, + flag_jobs: Option, + flag_features: Vec, + flag_all_features: bool, + flag_no_default_features: bool, + flag_target: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_release: bool, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_profile: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Compile a package and all of its dependencies + +Usage: + cargo rustc [options] [--] [...] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC Package to build + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --lib Build only this package's library + --bin NAME Build only the specified binary + --bins Build all binaries + --example NAME Build only the specified example + --examples Build all examples + --test NAME Build only the specified test target + --tests Build all tests + --bench NAME Build only the specified bench target + --benches Build all benches + --all-targets Build all targets (lib and bin targets by default) + --release Build artifacts in release mode, with optimizations + --profile PROFILE Profile to build the selected target for + --features FEATURES Features to compile for the package + --all-features Build all available features + --no-default-features Do not compile default features for the package + --target TRIPLE Target triple which compiles will be for + --manifest-path PATH Path to the manifest to fetch dependencies for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +The specified target for the current package (or package specified by SPEC if +provided) will be compiled along with all of its dependencies. The specified +... will all be passed to the final compiler invocation, not any of the +dependencies. Note that the compiler will still unconditionally receive +arguments such as -L, --extern, and --crate-type, and the specified ... +will simply be added to the compiler invocation. + +This command requires that only one target is being compiled. If more than one +target is available for the current package the filters of --lib, --bin, etc, +must be used to select which target is compiled. To pass flags to all compiler +processes spawned by Cargo, use the $RUSTFLAGS environment variable or the +`build.rustflags` configuration option. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-rustc; args={:?}", + env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, + config.cwd())?; + let mode = match options.flag_profile.as_ref().map(|t| &t[..]) { + Some("dev") | None => CompileMode::Build, + Some("test") => CompileMode::Test, + Some("bench") => CompileMode::Bench, + Some("check") => CompileMode::Check, + Some(mode) => { + let err = format!("unknown profile: `{}`, use dev, + test, or bench", mode).into(); + return Err(CliError::new(err, 101)) + } + }; + + let spec = options.flag_package.map_or_else(Vec::new, |s| vec![s]); + + let opts = CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: Packages::Packages(&spec), + mode: mode, + release: options.flag_release, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets), + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: options.arg_opts.as_ref().map(|a| &a[..]), + }; + + let ws = Workspace::new(&root, config)?; + ops::compile(&ws, &opts)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/rustdoc.rs b/collector/compile-benchmarks/cargo/src/bin/rustdoc.rs new file mode 100644 index 000000000..156a6b867 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/rustdoc.rs @@ -0,0 +1,127 @@ +use cargo::core::Workspace; +use cargo::ops::{self, MessageFormat, Packages}; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::{find_root_manifest_for_wd}; + +#[derive(Deserialize)] +pub struct Options { + arg_opts: Vec, + flag_target: Option, + flag_features: Vec, + flag_all_features: bool, + flag_jobs: Option, + flag_manifest_path: Option, + flag_no_default_features: bool, + flag_open: bool, + flag_verbose: u32, + flag_release: bool, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_package: Option, + flag_lib: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Build a package's documentation, using specified custom flags. + +Usage: + cargo rustdoc [options] [--] [...] + +Options: + -h, --help Print this message + --open Opens the docs in a browser after the operation + -p SPEC, --package SPEC Package to document + -j N, --jobs N Number of parallel jobs, defaults to # of CPUs + --lib Build only this package's library + --bin NAME Build only the specified binary + --bins Build all binaries + --example NAME Build only the specified example + --examples Build all examples + --test NAME Build only the specified test target + --tests Build all tests + --bench NAME Build only the specified bench target + --benches Build all benches + --all-targets Build all targets (default) + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to document + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +The specified target for the current package (or package specified by SPEC if +provided) will be documented with the specified ... being passed to the +final rustdoc invocation. Dependencies will not be documented as part of this +command. Note that rustdoc will still unconditionally receive arguments such +as -L, --extern, and --crate-type, and the specified ... will simply be +added to the rustdoc invocation. + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be documented. If it is not given, then the +current package is documented. For more information on SPEC and its format, see +the `cargo help pkgid` command. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, + config.cwd())?; + + let spec = options.flag_package.map_or_else(Vec::new, |s| vec![s]); + + let doc_opts = ops::DocOptions { + open_result: options.flag_open, + compile_opts: ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|t| &t[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: Packages::Packages(&spec), + release: options.flag_release, + filter: ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets), + message_format: options.flag_message_format, + mode: ops::CompileMode::Doc { deps: false }, + target_rustdoc_args: Some(&options.arg_opts), + target_rustc_args: None, + }, + }; + + let ws = Workspace::new(&root, config)?; + ops::doc(&ws, &doc_opts)?; + + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/search.rs b/collector/compile-benchmarks/cargo/src/bin/search.rs new file mode 100644 index 000000000..165dea1c8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/search.rs @@ -0,0 +1,82 @@ +use cargo::ops; +use cargo::util::{CliResult, Config}; + +use std::cmp; + +#[derive(Deserialize)] +pub struct Options { + flag_index: Option, + flag_host: Option, // TODO: Depricated, remove + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_limit: Option, + flag_frozen: bool, + flag_locked: bool, + arg_query: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Search packages in crates.io + +Usage: + cargo search [options] ... + cargo search [-h | --help] + +Options: + -h, --help Print this message + --index INDEX Registry index to search in + --host HOST DEPRECATED, renamed to '--index' + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --limit LIMIT Limit the number of results (default: 10, max: 100) + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let Options { + flag_index: index, + flag_host: host, // TODO: Depricated, remove + flag_limit: limit, + arg_query: query, + .. + } = options; + + // TODO: Depricated + // remove once it has been decided --host can be safely removed + // We may instead want to repurpose the host flag, as + // mentioned in this issue + // https://github.com/rust-lang/cargo/issues/4208 + + let msg = "The flag '--host' is no longer valid. + +Previous versions of Cargo accepted this flag, but it is being +depricated. The flag is being renamed to 'index', as the flag +wants the location of the index in which to search. Please +use '--index' instead. + +This will soon become a hard error, so it's either recommended +to update to a fixed version or contact the upstream maintainer +about this warning."; + + let index = if host.clone().is_none() || host.clone().unwrap().is_empty() { + index + } else { + config.shell().warn(&msg)?; + host + }; + + ops::search(&query.join("+"), config, index, cmp::min(100, limit.unwrap_or(10)) as u8)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/test.rs b/collector/compile-benchmarks/cargo/src/bin/test.rs new file mode 100644 index 000000000..2208a18af --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/test.rs @@ -0,0 +1,186 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops::{self, MessageFormat, Packages}; +use cargo::util::{CliResult, CliError, Config, CargoErrorKind}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + arg_args: Vec, + flag_features: Vec, + flag_all_features: bool, + flag_jobs: Option, + flag_manifest_path: Option, + flag_no_default_features: bool, + flag_no_run: bool, + flag_package: Vec, + flag_target: Option, + flag_lib: bool, + flag_doc: bool, + flag_bin: Vec, + flag_bins: bool, + flag_example: Vec, + flag_examples: bool, + flag_test: Vec, + flag_tests: bool, + flag_bench: Vec, + flag_benches: bool, + flag_all_targets: bool, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_message_format: MessageFormat, + flag_release: bool, + flag_no_fail_fast: bool, + flag_frozen: bool, + flag_locked: bool, + flag_all: bool, + flag_exclude: Vec, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Execute all unit and integration tests of a local package + +Usage: + cargo test [options] [--] [...] + +Options: + -h, --help Print this message + --lib Test only this package's library + --doc Test only this library's documentation + --bin NAME ... Test only the specified binary + --bins Test all binaries + --example NAME ... Check that the specified examples compile + --examples Check that all examples compile + --test NAME ... Test only the specified test target + --tests Test all tests + --bench NAME ... Test only the specified bench target + --benches Test all benches + --all-targets Test all targets (default) + --no-run Compile, but don't run tests + -p SPEC, --package SPEC ... Package to run tests for + --all Test all packages in the workspace + --exclude SPEC ... Exclude packages from the test + -j N, --jobs N Number of parallel builds, see below for details + --release Build artifacts in release mode, with optimizations + --features FEATURES Space-separated list of features to also build + --all-features Build all available features + --no-default-features Do not build the `default` feature + --target TRIPLE Build for the target triple + --manifest-path PATH Path to the manifest to build tests for + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --message-format FMT Error format: human, json [default: human] + --no-fail-fast Run all tests regardless of failure + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +All of the trailing arguments are passed to the test binaries generated for +filtering tests and generally providing options configuring how they run. For +example, this will run all tests with the name `foo` in their name: + + cargo test foo + +If the --package argument is given, then SPEC is a package id specification +which indicates which package should be tested. If it is not given, then the +current package is tested. For more information on SPEC and its format, see the +`cargo help pkgid` command. + +All packages in the workspace are tested if the `--all` flag is supplied. The +`--all` flag is automatically assumed for a virtual manifest. +Note that `--exclude` has to be specified in conjunction with the `--all` flag. + +The --jobs argument affects the building of the test executable but does +not affect how many jobs are used when running the tests. The default value +for the --jobs argument is the number of CPUs. If you want to control the +number of simultaneous running test cases, pass the `--test-threads` option +to the test binaries: + + cargo test -- --test-threads=1 + +Compilation can be configured via the `test` profile in the manifest. + +By default the rust test harness hides output from test execution to +keep results readable. Test output can be recovered (e.g. for debugging) +by passing `--nocapture` to the test binaries: + + cargo test -- --nocapture + +To get the list of all options available for the test binaries use this: + + cargo test -- --help +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-test; args={:?}", + env::args().collect::>()); + + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + let ws = Workspace::new(&root, config)?; + + let empty = Vec::new(); + let (mode, filter); + if options.flag_doc { + mode = ops::CompileMode::Doctest; + filter = ops::CompileFilter::new(true, &empty, false, &empty, false, + &empty, false, &empty, false, + false); + } else { + mode = ops::CompileMode::Test; + filter = ops::CompileFilter::new(options.flag_lib, + &options.flag_bin, options.flag_bins, + &options.flag_test, options.flag_tests, + &options.flag_example, options.flag_examples, + &options.flag_bench, options.flag_benches, + options.flag_all_targets); + } + + let spec = Packages::from_flags(ws.is_virtual(), + options.flag_all, + &options.flag_exclude, + &options.flag_package)?; + + let ops = ops::TestOptions { + no_run: options.flag_no_run, + no_fail_fast: options.flag_no_fail_fast, + only_doc: options.flag_doc, + compile_opts: ops::CompileOptions { + config: config, + jobs: options.flag_jobs, + target: options.flag_target.as_ref().map(|s| &s[..]), + features: &options.flag_features, + all_features: options.flag_all_features, + no_default_features: options.flag_no_default_features, + spec: spec, + release: options.flag_release, + mode: mode, + filter: filter, + message_format: options.flag_message_format, + target_rustdoc_args: None, + target_rustc_args: None, + }, + }; + + let err = ops::run_tests(&ws, &ops, &options.arg_args)?; + match err { + None => Ok(()), + Some(err) => { + Err(match err.exit.as_ref().and_then(|e| e.code()) { + Some(i) => CliError::new(err.hint().into(), i), + None => CliError::new(CargoErrorKind::CargoTestErrorKind(err).into(), 101), + }) + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/bin/uninstall.rs b/collector/compile-benchmarks/cargo/src/bin/uninstall.rs new file mode 100644 index 000000000..2adf2d041 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/uninstall.rs @@ -0,0 +1,55 @@ +use cargo::ops; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + flag_bin: Vec, + flag_root: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, + + arg_spec: String, +} + +pub const USAGE: &'static str = " +Remove a Rust binary + +Usage: + cargo uninstall [options] + cargo uninstall (-h | --help) + +Options: + -h, --help Print this message + --root DIR Directory to uninstall packages from + --bin NAME Only uninstall the binary NAME + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet Less output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +The argument SPEC is a package id specification (see `cargo help pkgid`) to +specify which crate should be uninstalled. By default all binaries are +uninstalled for a crate but the `--bin` and `--example` flags can be used to +only uninstall particular binaries. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + + let root = options.flag_root.as_ref().map(|s| &s[..]); + ops::uninstall(root, &options.arg_spec, &options.flag_bin, config)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/bin/update.rs b/collector/compile-benchmarks/cargo/src/bin/update.rs new file mode 100644 index 000000000..1e7f92b53 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/update.rs @@ -0,0 +1,83 @@ +use std::env; + +use cargo::core::Workspace; +use cargo::ops; +use cargo::util::{CliResult, Config}; +use cargo::util::important_paths::find_root_manifest_for_wd; + +#[derive(Deserialize)] +pub struct Options { + flag_package: Vec, + flag_aggressive: bool, + flag_precise: Option, + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Update dependencies as recorded in the local lock file. + +Usage: + cargo update [options] + +Options: + -h, --help Print this message + -p SPEC, --package SPEC ... Package to update + --aggressive Force updating all dependencies of as well + --precise PRECISE Update a single dependency to exactly PRECISE + --manifest-path PATH Path to the crate's manifest + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +This command requires that a `Cargo.lock` already exists as generated by +`cargo build` or related commands. + +If SPEC is given, then a conservative update of the lockfile will be +performed. This means that only the dependency specified by SPEC will be +updated. Its transitive dependencies will be updated only if SPEC cannot be +updated without updating dependencies. All other dependencies will remain +locked at their currently recorded versions. + +If PRECISE is specified, then --aggressive must not also be specified. The +argument PRECISE is a string representing a precise revision that the package +being updated should be updated to. For example, if the package comes from a git +repository, then PRECISE would be the exact revision that the repository should +be updated to. + +If SPEC is not given, then all dependencies will be re-resolved and +updated. + +For more information about package id specifications, see `cargo help pkgid`. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-update; args={:?}", env::args().collect::>()); + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + let root = find_root_manifest_for_wd(options.flag_manifest_path, config.cwd())?; + + let update_opts = ops::UpdateOptions { + aggressive: options.flag_aggressive, + precise: options.flag_precise.as_ref().map(|s| &s[..]), + to_update: &options.flag_package, + config: config, + }; + + let ws = Workspace::new(&root, config)?; + ops::update_lockfile(&ws, &update_opts)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/verify_project.rs b/collector/compile-benchmarks/cargo/src/bin/verify_project.rs new file mode 100644 index 000000000..a05447026 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/verify_project.rs @@ -0,0 +1,77 @@ +use std::collections::HashMap; +use std::fs::File; +use std::io::prelude::*; +use std::process; + +use cargo; +use cargo::util::important_paths::{find_root_manifest_for_wd}; +use cargo::util::{CliResult, Config}; +use serde_json; +use toml; + +#[derive(Deserialize)] +pub struct Flags { + flag_manifest_path: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub const USAGE: &'static str = " +Check correctness of crate manifest + +Usage: + cargo verify-project [options] + cargo verify-project -h | --help + +Options: + -h, --help Print this message + --manifest-path PATH Path to the manifest to verify + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo +"; + +pub fn execute(args: Flags, config: &mut Config) -> CliResult { + config.configure(args.flag_verbose, + args.flag_quiet, + &args.flag_color, + args.flag_frozen, + args.flag_locked, + &args.flag_z)?; + + let mut contents = String::new(); + let filename = args.flag_manifest_path.unwrap_or_else(|| "Cargo.toml".into()); + let filename = match find_root_manifest_for_wd(Some(filename), config.cwd()) { + Ok(manifest_path) => manifest_path, + Err(e) => fail("invalid", &e.to_string()), + }; + + let file = File::open(&filename); + match file.and_then(|mut f| f.read_to_string(&mut contents)) { + Ok(_) => {}, + Err(e) => fail("invalid", &format!("error reading file: {}", e)) + }; + if contents.parse::().is_err() { + fail("invalid", "invalid-format"); + } + + let mut h = HashMap::new(); + h.insert("success".to_string(), "true".to_string()); + cargo::print_json(&h); + Ok(()) +} + +fn fail(reason: &str, value: &str) -> ! { + let mut h = HashMap::new(); + h.insert(reason.to_string(), value.to_string()); + println!("{}", serde_json::to_string(&h).unwrap()); + process::exit(1) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/version.rs b/collector/compile-benchmarks/cargo/src/bin/version.rs new file mode 100644 index 000000000..6d3772f1c --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/version.rs @@ -0,0 +1,27 @@ +use std::env; + +use cargo; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options; + +pub const USAGE: &'static str = " +Show version information + +Usage: + cargo version [options] + +Options: + -h, --help Print this message + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + --color WHEN Coloring: auto, always, never +"; + +pub fn execute(_: Options, _: &mut Config) -> CliResult { + debug!("executing; cmd=cargo-version; args={:?}", env::args().collect::>()); + + println!("{}", cargo::version()); + + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/bin/yank.rs b/collector/compile-benchmarks/cargo/src/bin/yank.rs new file mode 100644 index 000000000..a00892a51 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/bin/yank.rs @@ -0,0 +1,63 @@ +use cargo::ops; +use cargo::util::{CliResult, Config}; + +#[derive(Deserialize)] +pub struct Options { + arg_crate: Option, + flag_token: Option, + flag_vers: Option, + flag_index: Option, + flag_verbose: u32, + flag_quiet: Option, + flag_color: Option, + flag_undo: bool, + flag_frozen: bool, + flag_locked: bool, + #[serde(rename = "flag_Z")] + flag_z: Vec, +} + +pub static USAGE: &'static str = " +Remove a pushed crate from the index + +Usage: + cargo yank [options] [] + +Options: + -h, --help Print this message + --vers VERSION The version to yank or un-yank + --undo Undo a yank, putting a version back into the index + --index INDEX Registry index to yank from + --token TOKEN API token to use when authenticating + -v, --verbose ... Use verbose output (-vv very verbose/build.rs output) + -q, --quiet No output printed to stdout + --color WHEN Coloring: auto, always, never + --frozen Require Cargo.lock and cache are up to date + --locked Require Cargo.lock is up to date + -Z FLAG ... Unstable (nightly-only) flags to Cargo + +The yank command removes a previously pushed crate's version from the server's +index. This command does not delete any data, and the crate will still be +available for download via the registry's download link. + +Note that existing crates locked to a yanked version will still be able to +download the yanked version to use it. Cargo will, however, not allow any new +crates to be locked to any yanked version. +"; + +pub fn execute(options: Options, config: &mut Config) -> CliResult { + config.configure(options.flag_verbose, + options.flag_quiet, + &options.flag_color, + options.flag_frozen, + options.flag_locked, + &options.flag_z)?; + ops::yank(config, + options.arg_crate, + options.flag_vers, + options.flag_token, + options.flag_index, + options.flag_undo)?; + Ok(()) +} + diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/dependency.rs b/collector/compile-benchmarks/cargo/src/cargo/core/dependency.rs new file mode 100644 index 000000000..24d6c9f35 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/dependency.rs @@ -0,0 +1,369 @@ +use std::fmt; +use std::rc::Rc; +use std::str::FromStr; + +use semver::VersionReq; +use semver::ReqParseError; +use serde::ser; + +use core::{SourceId, Summary, PackageId}; +use util::{Cfg, CfgExpr, Config}; +use util::errors::{CargoResult, CargoResultExt, CargoError}; + +/// Information about a dependency requested by a Cargo manifest. +/// Cheap to copy. +#[derive(PartialEq, Clone, Debug)] +pub struct Dependency { + inner: Rc, +} + +/// The data underlying a Dependency. +#[derive(PartialEq, Clone, Debug)] +struct Inner { + name: String, + source_id: SourceId, + req: VersionReq, + specified_req: bool, + kind: Kind, + only_match_name: bool, + + optional: bool, + default_features: bool, + features: Vec, + + // This dependency should be used only for this platform. + // `None` means *all platforms*. + platform: Option, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Platform { + Name(String), + Cfg(CfgExpr), +} + +#[derive(Serialize)] +struct SerializedDependency<'a> { + name: &'a str, + source: &'a SourceId, + req: String, + kind: Kind, + + optional: bool, + uses_default_features: bool, + features: &'a [String], + target: Option<&'a Platform>, +} + +impl ser::Serialize for Dependency { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + SerializedDependency { + name: self.name(), + source: self.source_id(), + req: self.version_req().to_string(), + kind: self.kind(), + optional: self.is_optional(), + uses_default_features: self.uses_default_features(), + features: self.features(), + target: self.platform(), + }.serialize(s) + } +} + +#[derive(PartialEq, Clone, Debug, Copy)] +pub enum Kind { + Normal, + Development, + Build, +} + +fn parse_req_with_deprecated(req: &str, + extra: Option<(&PackageId, &Config)>) + -> CargoResult { + match VersionReq::parse(req) { + Err(e) => { + let (inside, config) = match extra { + Some(pair) => pair, + None => return Err(e.into()), + }; + match e { + ReqParseError::DeprecatedVersionRequirement(requirement) => { + let msg = format!("\ +parsed version requirement `{}` is no longer valid + +Previous versions of Cargo accepted this malformed requirement, +but it is being deprecated. This was found when parsing the manifest +of {} {}, and the correct version requirement is `{}`. + +This will soon become a hard error, so it's either recommended to +update to a fixed version or contact the upstream maintainer about +this warning. +", +req, inside.name(), inside.version(), requirement); + config.shell().warn(&msg)?; + + Ok(requirement) + } + e => Err(e.into()), + } + }, + Ok(v) => Ok(v), + } +} + +impl ser::Serialize for Kind { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + match *self { + Kind::Normal => None, + Kind::Development => Some("dev"), + Kind::Build => Some("build"), + }.serialize(s) + } +} + +impl Dependency { + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse(name: &str, + version: Option<&str>, + source_id: &SourceId, + inside: &PackageId, + config: &Config) -> CargoResult { + let arg = Some((inside, config)); + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(v, arg)?), + None => (false, VersionReq::any()) + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse_no_deprecated(name: &str, + version: Option<&str>, + source_id: &SourceId) -> CargoResult { + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(v, None)?), + None => (false, VersionReq::any()) + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + pub fn new_override(name: &str, source_id: &SourceId) -> Dependency { + Dependency { + inner: Rc::new(Inner { + name: name.to_string(), + source_id: source_id.clone(), + req: VersionReq::any(), + kind: Kind::Normal, + only_match_name: true, + optional: false, + features: Vec::new(), + default_features: true, + specified_req: false, + platform: None, + }), + } + } + + pub fn version_req(&self) -> &VersionReq { + &self.inner.req + } + + pub fn name(&self) -> &str { + &self.inner.name + } + + pub fn source_id(&self) -> &SourceId { + &self.inner.source_id + } + + pub fn kind(&self) -> Kind { + self.inner.kind + } + + pub fn specified_req(&self) -> bool { + self.inner.specified_req + } + + /// If none, this dependencies must be built for all platforms. + /// If some, it must only be built for the specified platform. + pub fn platform(&self) -> Option<&Platform> { + self.inner.platform.as_ref() + } + + pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency { + Rc::make_mut(&mut self.inner).kind = kind; + self + } + + /// Sets the list of features requested for the package. + pub fn set_features(&mut self, features: Vec) -> &mut Dependency { + Rc::make_mut(&mut self.inner).features = features; + self + } + + /// Sets whether the dependency requests default features of the package. + pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).default_features = default_features; + self + } + + /// Sets whether the dependency is optional. + pub fn set_optional(&mut self, optional: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).optional = optional; + self + } + + /// Set the source id for this dependency + pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).source_id = id; + self + } + + /// Set the version requirement for this dependency + pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency { + Rc::make_mut(&mut self.inner).req = req; + self + } + + pub fn set_platform(&mut self, platform: Option) -> &mut Dependency { + Rc::make_mut(&mut self.inner).platform = platform; + self + } + + /// Lock this dependency to depending on the specified package id + pub fn lock_to(&mut self, id: &PackageId) -> &mut Dependency { + assert_eq!(self.inner.source_id, *id.source_id()); + assert!(self.inner.req.matches(id.version())); + self.set_version_req(VersionReq::exact(id.version())) + .set_source_id(id.source_id().clone()) + } + + /// Returns whether this is a "locked" dependency, basically whether it has + /// an exact version req. + pub fn is_locked(&self) -> bool { + // Kind of a hack to figure this out, but it works! + self.inner.req.to_string().starts_with('=') + } + + /// Returns false if the dependency is only used to build the local package. + pub fn is_transitive(&self) -> bool { + match self.inner.kind { + Kind::Normal | Kind::Build => true, + Kind::Development => false, + } + } + + pub fn is_build(&self) -> bool { + match self.inner.kind { + Kind::Build => true, + _ => false, + } + } + + pub fn is_optional(&self) -> bool { + self.inner.optional + } + + /// Returns true if the default features of the dependency are requested. + pub fn uses_default_features(&self) -> bool { + self.inner.default_features + } + /// Returns the list of features that are requested by the dependency. + pub fn features(&self) -> &[String] { + &self.inner.features + } + + /// Returns true if the package (`sum`) can fulfill this dependency request. + pub fn matches(&self, sum: &Summary) -> bool { + self.matches_id(sum.package_id()) + } + + /// Returns true if the package (`sum`) can fulfill this dependency request. + pub fn matches_ignoring_source(&self, sum: &Summary) -> bool { + self.name() == sum.package_id().name() && + self.version_req().matches(sum.package_id().version()) + } + + /// Returns true if the package (`id`) can fulfill this dependency request. + pub fn matches_id(&self, id: &PackageId) -> bool { + self.inner.name == id.name() && + (self.inner.only_match_name || (self.inner.req.matches(id.version()) && + &self.inner.source_id == id.source_id())) + } + + pub fn map_source(mut self, to_replace: &SourceId, replace_with: &SourceId) + -> Dependency { + if self.source_id() != to_replace { + self + } else { + self.set_source_id(replace_with.clone()); + self + } + } +} + +impl Platform { + pub fn matches(&self, name: &str, cfg: Option<&[Cfg]>) -> bool { + match *self { + Platform::Name(ref p) => p == name, + Platform::Cfg(ref p) => { + match cfg { + Some(cfg) => p.matches(cfg), + None => false, + } + } + } + } +} + +impl ser::Serialize for Platform { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl FromStr for Platform { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + if s.starts_with("cfg(") && s.ends_with(')') { + let s = &s[4..s.len()-1]; + s.parse().map(Platform::Cfg).chain_err(|| { + format!("failed to parse `{}` as a cfg expression", s) + }) + } else { + Ok(Platform::Name(s.to_string())) + } + } +} + +impl fmt::Display for Platform { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Platform::Name(ref n) => n.fmt(f), + Platform::Cfg(ref e) => write!(f, "cfg({})", e), + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/features.rs b/collector/compile-benchmarks/cargo/src/cargo/core/features.rs new file mode 100644 index 000000000..4bfafc174 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/features.rs @@ -0,0 +1,279 @@ +//! Support for nightly features in Cargo itself +//! +//! This file is the version of `feature_gate.rs` in upstream Rust for Cargo +//! itself and is intended to be the avenue for which new features in Cargo are +//! gated by default and then eventually stabilized. All known stable and +//! unstable features are tracked in this file. +//! +//! If you're reading this then you're likely interested in adding a feature to +//! Cargo, and the good news is that it shouldn't be too hard! To do this you'll +//! want to follow these steps: +//! +//! 1. Add your feature. Do this by searching for "look here" in this file and +//! expanding the macro invocation that lists all features with your new +//! feature. +//! +//! 2. Find the appropriate place to place the feature gate in Cargo itself. If +//! you're extending the manifest format you'll likely just want to modify +//! the `Manifest::feature_gate` function, but otherwise you may wish to +//! place the feature gate elsewhere in Cargo. +//! +//! 3. To actually perform the feature gate, you'll want to have code that looks +//! like: +//! +//! ```rust,ignore +//! use core::{Feature, Features}; +//! +//! let feature = Feature::launch_into_space(); +//! package.manifest().features().require(feature).chain_err(|| { +//! "launching Cargo into space right now is unstable and may result in \ +//! unintended damage to your codebase, use with caution" +//! })?; +//! ``` +//! +//! Notably you'll notice the `require` funciton called with your `Feature`, and +//! then you use `chain_err` to tack on more context for why the feature was +//! required when the feature isn't activated. +//! +//! And hopefully that's it! Bear with us though that this is, at the time of +//! this writing, a very new feature in Cargo. If the process differs from this +//! we'll be sure to update this documentation! + +use std::env; + +use util::errors::CargoResult; + +enum Status { + Stable, + Unstable, +} + +macro_rules! features { + ( + pub struct Features { + $([$stab:ident] $feature:ident: bool,)* + } + ) => ( + #[derive(Default, Clone, Debug)] + pub struct Features { + $($feature: bool,)* + activated: Vec, + } + + impl Feature { + $( + pub fn $feature() -> &'static Feature { + fn get(features: &Features) -> bool { + features.$feature + } + static FEAT: Feature = Feature { + name: stringify!($feature), + get: get, + }; + &FEAT + } + )* + + fn is_enabled(&self, features: &Features) -> bool { + (self.get)(features) + } + } + + impl Features { + fn status(&mut self, feature: &str) -> Option<(&mut bool, Status)> { + if feature.contains("_") { + return None + } + let feature = feature.replace("-", "_"); + $( + if feature == stringify!($feature) { + return Some((&mut self.$feature, stab!($stab))) + } + )* + None + } + } + ) +} + +macro_rules! stab { + (stable) => (Status::Stable); + (unstable) => (Status::Unstable); +} + +/// A listing of all features in Cargo +/// +/// "look here" +/// +/// This is the macro that lists all stable and unstable features in Cargo. +/// You'll want to add to this macro whenever you add a feature to Cargo, also +/// following the directions above. +/// +/// Note that all feature names here are valid Rust identifiers, but the `_` +/// character is translated to `-` when specified in the `cargo-features` +/// manifest entry in `Cargo.toml`. +features! { + pub struct Features { + + // A dummy feature that doesn't actually gate anything, but it's used in + // testing to ensure that we can enable stable features. + [stable] test_dummy_stable: bool, + + // A dummy feature that gates the usage of the `im-a-teapot` manifest + // entry. This is basically just intended for tests. + [unstable] test_dummy_unstable: bool, + } +} + +pub struct Feature { + name: &'static str, + get: fn(&Features) -> bool, +} + +impl Features { + pub fn new(features: &[String], + warnings: &mut Vec) -> CargoResult { + let mut ret = Features::default(); + for feature in features { + ret.add(feature, warnings)?; + ret.activated.push(feature.to_string()); + } + Ok(ret) + } + + fn add(&mut self, feature: &str, warnings: &mut Vec) -> CargoResult<()> { + let (slot, status) = match self.status(feature) { + Some(p) => p, + None => bail!("unknown cargo feature `{}`", feature), + }; + + if *slot { + bail!("the cargo feature `{}` has already been activated", feature); + } + + match status { + Status::Stable => { + let warning = format!("the cargo feature `{}` is now stable \ + and is no longer necessary to be listed \ + in the manifest", feature); + warnings.push(warning); + } + Status::Unstable if !nightly_features_allowed() => { + bail!("the cargo feature `{}` requires a nightly version of \ + Cargo, but this is the `{}` channel", + feature, + channel()) + } + Status::Unstable => {} + } + + *slot = true; + + Ok(()) + } + + pub fn activated(&self) -> &[String] { + &self.activated + } + + pub fn require(&self, feature: &Feature) -> CargoResult<()> { + if feature.is_enabled(self) { + Ok(()) + } else { + let feature = feature.name.replace("_", "-"); + let mut msg = format!("feature `{}` is required", feature); + + if nightly_features_allowed() { + let s = format!("\n\nconsider adding `cargo-features = [\"{0}\"]` \ + to the manifest", feature); + msg.push_str(&s); + } else { + let s = format!("\n\n\ + this Cargo does not support nightly features, but if you\n\ + switch to nightly channel you can add\n\ + `cargo-features = [\"{}\"]` to enable this feature", + feature); + msg.push_str(&s); + } + bail!("{}", msg); + } + } +} + +/// A parsed representation of all unstable flags that Cargo accepts. +/// +/// Cargo, like `rustc`, accepts a suite of `-Z` flags which are intended for +/// gating unstable functionality to Cargo. These flags are only available on +/// the nightly channel of Cargo. +/// +/// This struct doesn't have quite the same convenience macro that the features +/// have above, but the procedure should still be relatively stable for adding a +/// new unstable flag: +/// +/// 1. First, add a field to this `CliUnstable` structure. All flags are allowed +/// to have a value as the `-Z` flags are either of the form `-Z foo` or +/// `-Z foo=bar`, and it's up to you how to parse `bar`. +/// +/// 2. Add an arm to the match statement in `CliUnstable::add` below to match on +/// your new flag. The key (`k`) is what you're matching on and the value is +/// in `v`. +/// +/// 3. (optional) Add a new parsing function to parse your datatype. As of now +/// there's an example for `bool`, but more can be added! +/// +/// 4. In Cargo use `config.cli_unstable()` to get a reference to this structure +/// and then test for your flag or your value and act accordingly. +/// +/// If you have any trouble with this, please let us know! +#[derive(Default, Debug)] +pub struct CliUnstable { + pub print_im_a_teapot: bool, +} + +impl CliUnstable { + pub fn parse(&mut self, flags: &[String]) -> CargoResult<()> { + if !flags.is_empty() && !nightly_features_allowed() { + bail!("the `-Z` flag is only accepted on the nightly channel of Cargo") + } + for flag in flags { + self.add(flag)?; + } + Ok(()) + } + + fn add(&mut self, flag: &str) -> CargoResult<()> { + let mut parts = flag.splitn(2, '='); + let k = parts.next().unwrap(); + let v = parts.next(); + + fn parse_bool(value: Option<&str>) -> CargoResult { + match value { + None | + Some("yes") => Ok(true), + Some("no") => Ok(false), + Some(s) => bail!("expected `no` or `yes`, found: {}", s), + } + } + + match k { + "print-im-a-teapot" => self.print_im_a_teapot = parse_bool(v)?, + _ => bail!("unknown `-Z` flag specified: {}", k), + } + + Ok(()) + } +} + +fn channel() -> String { + env::var("__CARGO_TEST_CHANNEL_OVERRIDE_DO_NOT_USE_THIS").unwrap_or_else(|_| { + ::version().cfg_info.map(|c| c.release_channel) + .unwrap_or_else(|| String::from("dev")) + }) +} + +fn nightly_features_allowed() -> bool { + match &channel()[..] { + "nightly" | "dev" => true, + _ => false, + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/manifest.rs b/collector/compile-benchmarks/cargo/src/cargo/core/manifest.rs new file mode 100644 index 000000000..d8c3710ed --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/manifest.rs @@ -0,0 +1,709 @@ +use std::collections::{HashMap, BTreeMap}; +use std::fmt; +use std::path::{PathBuf, Path}; +use std::rc::Rc; + +use semver::Version; +use serde::ser; +use url::Url; + +use core::{Dependency, PackageId, Summary, SourceId, PackageIdSpec}; +use core::{WorkspaceConfig, Features, Feature}; +use util::Config; +use util::toml::TomlManifest; +use util::errors::*; + +pub enum EitherManifest { + Real(Manifest), + Virtual(VirtualManifest), +} + +/// Contains all the information about a package, as loaded from a Cargo.toml. +#[derive(Clone, Debug)] +pub struct Manifest { + summary: Summary, + targets: Vec, + links: Option, + warnings: Vec, + exclude: Vec, + include: Vec, + metadata: ManifestMetadata, + profiles: Profiles, + publish: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + original: Rc, + features: Features, + im_a_teapot: Option, +} + +/// When parsing `Cargo.toml`, some warnings should silenced +/// if the manifest comes from a dependency. `ManifestWarning` +/// allows this delayed emission of warnings. +#[derive(Clone, Debug)] +pub struct DelayedWarning { + pub message: String, + pub is_critical: bool +} + +#[derive(Clone, Debug)] +pub struct VirtualManifest { + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles, +} + +/// General metadata about a package which is just blindly uploaded to the +/// registry. +/// +/// Note that many of these fields can contain invalid values such as the +/// homepage, repository, documentation, or license. These fields are not +/// validated by cargo itself, but rather it is up to the registry when uploaded +/// to validate these fields. Cargo will itself accept any valid TOML +/// specification for these values. +#[derive(PartialEq, Clone, Debug)] +pub struct ManifestMetadata { + pub authors: Vec, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub description: Option, // not markdown + pub readme: Option, // file, not contents + pub homepage: Option, // url + pub repository: Option, // url + pub documentation: Option, // url + pub badges: BTreeMap>, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub enum LibKind { + Lib, + Rlib, + Dylib, + ProcMacro, + Other(String), +} + +impl LibKind { + pub fn from_str(string: &str) -> LibKind { + match string { + "lib" => LibKind::Lib, + "rlib" => LibKind::Rlib, + "dylib" => LibKind::Dylib, + "proc-macro" => LibKind::ProcMacro, + s => LibKind::Other(s.to_string()), + } + } + + /// Returns the argument suitable for `--crate-type` to pass to rustc. + pub fn crate_type(&self) -> &str { + match *self { + LibKind::Lib => "lib", + LibKind::Rlib => "rlib", + LibKind::Dylib => "dylib", + LibKind::ProcMacro => "proc-macro", + LibKind::Other(ref s) => s, + } + } + + pub fn linkable(&self) -> bool { + match *self { + LibKind::Lib | + LibKind::Rlib | + LibKind::Dylib | + LibKind::ProcMacro => true, + LibKind::Other(..) => false, + } + } +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub enum TargetKind { + Lib(Vec), + Bin, + Test, + Bench, + ExampleLib(Vec), + ExampleBin, + CustomBuild, +} + +impl ser::Serialize for TargetKind { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + use self::TargetKind::*; + match *self { + Lib(ref kinds) => kinds.iter().map(LibKind::crate_type).collect(), + Bin => vec!["bin"], + ExampleBin | ExampleLib(_) => vec!["example"], + Test => vec!["test"], + CustomBuild => vec!["custom-build"], + Bench => vec!["bench"] + }.serialize(s) + } +} + + +// Note that most of the fields here are skipped when serializing because we +// don't want to export them just yet (becomes a public API of Cargo). Others +// though are definitely needed! +#[derive(Clone, PartialEq, Eq, Debug, Hash, Serialize)] +pub struct Profile { + pub opt_level: String, + #[serde(skip_serializing)] + pub lto: bool, + #[serde(skip_serializing)] + pub codegen_units: Option, // None = use rustc default + #[serde(skip_serializing)] + pub rustc_args: Option>, + #[serde(skip_serializing)] + pub rustdoc_args: Option>, + pub debuginfo: Option, + pub debug_assertions: bool, + pub overflow_checks: bool, + #[serde(skip_serializing)] + pub rpath: bool, + pub test: bool, + #[serde(skip_serializing)] + pub doc: bool, + #[serde(skip_serializing)] + pub run_custom_build: bool, + #[serde(skip_serializing)] + pub check: bool, + #[serde(skip_serializing)] + pub panic: Option, +} + +#[derive(Default, Clone, Debug, PartialEq, Eq)] +pub struct Profiles { + pub release: Profile, + pub dev: Profile, + pub test: Profile, + pub test_deps: Profile, + pub bench: Profile, + pub bench_deps: Profile, + pub doc: Profile, + pub custom_build: Profile, + pub check: Profile, + pub doctest: Profile, +} + +/// Information about a binary, a library, an example, etc. that is part of the +/// package. +#[derive(Clone, Hash, PartialEq, Eq, Debug)] +pub struct Target { + kind: TargetKind, + name: String, + src_path: PathBuf, + required_features: Option>, + tested: bool, + benched: bool, + doc: bool, + doctest: bool, + harness: bool, // whether to use the test harness (--test) + for_host: bool, +} + +#[derive(Serialize)] +struct SerializedTarget<'a> { + /// Is this a `--bin bin`, `--lib`, `--example ex`? + /// Serialized as a list of strings for historical reasons. + kind: &'a TargetKind, + /// Corresponds to `--crate-type` compiler attribute. + /// See https://doc.rust-lang.org/reference.html#linkage + crate_types: Vec<&'a str>, + name: &'a str, + src_path: &'a PathBuf, +} + +impl ser::Serialize for Target { + fn serialize(&self, s: S) -> Result { + SerializedTarget { + kind: &self.kind, + crate_types: self.rustc_crate_types(), + name: &self.name, + src_path: &self.src_path, + }.serialize(s) + } +} + +impl Manifest { + pub fn new(summary: Summary, + targets: Vec, + exclude: Vec, + include: Vec, + links: Option, + metadata: ManifestMetadata, + profiles: Profiles, + publish: bool, + replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + features: Features, + im_a_teapot: Option, + original: Rc) -> Manifest { + Manifest { + summary: summary, + targets: targets, + warnings: Vec::new(), + exclude: exclude, + include: include, + links: links, + metadata: metadata, + profiles: profiles, + publish: publish, + replace: replace, + patch: patch, + workspace: workspace, + features: features, + original: original, + im_a_teapot: im_a_teapot, + } + } + + pub fn dependencies(&self) -> &[Dependency] { self.summary.dependencies() } + pub fn exclude(&self) -> &[String] { &self.exclude } + pub fn include(&self) -> &[String] { &self.include } + pub fn metadata(&self) -> &ManifestMetadata { &self.metadata } + pub fn name(&self) -> &str { self.package_id().name() } + pub fn package_id(&self) -> &PackageId { self.summary.package_id() } + pub fn summary(&self) -> &Summary { &self.summary } + pub fn targets(&self) -> &[Target] { &self.targets } + pub fn version(&self) -> &Version { self.package_id().version() } + pub fn warnings(&self) -> &[DelayedWarning] { &self.warnings } + pub fn profiles(&self) -> &Profiles { &self.profiles } + pub fn publish(&self) -> bool { self.publish } + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { &self.replace } + pub fn original(&self) -> &TomlManifest { &self.original } + pub fn patch(&self) -> &HashMap> { &self.patch } + pub fn links(&self) -> Option<&str> { + self.links.as_ref().map(|s| &s[..]) + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn features(&self) -> &Features { + &self.features + } + + pub fn add_warning(&mut self, s: String) { + self.warnings.push(DelayedWarning { message: s, is_critical: false }) + } + + pub fn add_critical_warning(&mut self, s: String) { + self.warnings.push(DelayedWarning { message: s, is_critical: true }) + } + + pub fn set_summary(&mut self, summary: Summary) { + self.summary = summary; + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) + -> Manifest { + Manifest { + summary: self.summary.map_source(to_replace, replace_with), + ..self + } + } + + pub fn feature_gate(&self) -> CargoResult<()> { + if self.im_a_teapot.is_some() { + self.features.require(Feature::test_dummy_unstable()).chain_err(|| { + "the `im-a-teapot` manifest key is unstable and may not work \ + properly in England" + })?; + } + + Ok(()) + } + + // Just a helper function to test out `-Z` flags on Cargo + pub fn print_teapot(&self, config: &Config) { + if let Some(teapot) = self.im_a_teapot { + if config.cli_unstable().print_im_a_teapot { + println!("im-a-teapot = {}", teapot); + } + } + } +} + +impl VirtualManifest { + pub fn new(replace: Vec<(PackageIdSpec, Dependency)>, + patch: HashMap>, + workspace: WorkspaceConfig, + profiles: Profiles) -> VirtualManifest { + VirtualManifest { + replace: replace, + patch: patch, + workspace: workspace, + profiles: profiles, + } + } + + pub fn replace(&self) -> &[(PackageIdSpec, Dependency)] { + &self.replace + } + + pub fn patch(&self) -> &HashMap> { + &self.patch + } + + pub fn workspace_config(&self) -> &WorkspaceConfig { + &self.workspace + } + + pub fn profiles(&self) -> &Profiles { + &self.profiles + } +} + +impl Target { + fn with_path(src_path: PathBuf) -> Target { + assert!(src_path.is_absolute()); + Target { + kind: TargetKind::Bin, + name: String::new(), + src_path: src_path, + required_features: None, + doc: false, + doctest: false, + harness: true, + for_host: false, + tested: true, + benched: true, + } + } + + pub fn lib_target(name: &str, + crate_targets: Vec, + src_path: PathBuf) -> Target { + Target { + kind: TargetKind::Lib(crate_targets), + name: name.to_string(), + doctest: true, + doc: true, + ..Target::with_path(src_path) + } + } + + pub fn bin_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { + Target { + kind: TargetKind::Bin, + name: name.to_string(), + required_features: required_features, + doc: true, + ..Target::with_path(src_path) + } + } + + /// Builds a `Target` corresponding to the `build = "build.rs"` entry. + pub fn custom_build_target(name: &str, src_path: PathBuf) -> Target { + Target { + kind: TargetKind::CustomBuild, + name: name.to_string(), + for_host: true, + benched: false, + tested: false, + ..Target::with_path(src_path) + } + } + + pub fn example_target(name: &str, + crate_targets: Vec, + src_path: PathBuf, + required_features: Option>) -> Target { + let kind = if crate_targets.is_empty() { + TargetKind::ExampleBin + } else { + TargetKind::ExampleLib(crate_targets) + }; + + Target { + kind: kind, + name: name.to_string(), + required_features: required_features, + benched: false, + ..Target::with_path(src_path) + } + } + + pub fn test_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { + Target { + kind: TargetKind::Test, + name: name.to_string(), + required_features: required_features, + benched: false, + ..Target::with_path(src_path) + } + } + + pub fn bench_target(name: &str, src_path: PathBuf, + required_features: Option>) -> Target { + Target { + kind: TargetKind::Bench, + name: name.to_string(), + required_features: required_features, + tested: false, + ..Target::with_path(src_path) + } + } + + pub fn name(&self) -> &str { &self.name } + pub fn crate_name(&self) -> String { self.name.replace("-", "_") } + pub fn src_path(&self) -> &Path { &self.src_path } + pub fn required_features(&self) -> Option<&Vec> { self.required_features.as_ref() } + pub fn kind(&self) -> &TargetKind { &self.kind } + pub fn tested(&self) -> bool { self.tested } + pub fn harness(&self) -> bool { self.harness } + pub fn documented(&self) -> bool { self.doc } + pub fn for_host(&self) -> bool { self.for_host } + pub fn benched(&self) -> bool { self.benched } + + pub fn doctested(&self) -> bool { + self.doctest && match self.kind { + TargetKind::Lib(ref kinds) => { + kinds.iter().any(|k| { + *k == LibKind::Rlib || + *k == LibKind::Lib || + *k == LibKind::ProcMacro + }) + } + _ => false, + } + } + + pub fn allows_underscores(&self) -> bool { + self.is_bin() || self.is_example() || self.is_custom_build() + } + + pub fn is_lib(&self) -> bool { + match self.kind { + TargetKind::Lib(_) => true, + _ => false + } + } + + pub fn is_dylib(&self) -> bool { + match self.kind { + TargetKind::Lib(ref libs) => libs.iter().any(|l| *l == LibKind::Dylib), + _ => false + } + } + + pub fn is_cdylib(&self) -> bool { + let libs = match self.kind { + TargetKind::Lib(ref libs) => libs, + _ => return false + }; + libs.iter().any(|l| { + match *l { + LibKind::Other(ref s) => s == "cdylib", + _ => false, + } + }) + } + + pub fn linkable(&self) -> bool { + match self.kind { + TargetKind::Lib(ref kinds) => { + kinds.iter().any(|k| k.linkable()) + } + _ => false + } + } + + pub fn is_bin(&self) -> bool { self.kind == TargetKind::Bin } + + pub fn is_example(&self) -> bool { + match self.kind { + TargetKind::ExampleBin | + TargetKind::ExampleLib(..) => true, + _ => false + } + } + + pub fn is_bin_example(&self) -> bool { + // Needed for --all-examples in contexts where only runnable examples make sense + match self.kind { + TargetKind::ExampleBin => true, + _ => false + } + } + + pub fn is_test(&self) -> bool { self.kind == TargetKind::Test } + pub fn is_bench(&self) -> bool { self.kind == TargetKind::Bench } + pub fn is_custom_build(&self) -> bool { self.kind == TargetKind::CustomBuild } + + /// Returns the arguments suitable for `--crate-type` to pass to rustc. + pub fn rustc_crate_types(&self) -> Vec<&str> { + match self.kind { + TargetKind::Lib(ref kinds) | + TargetKind::ExampleLib(ref kinds) => { + kinds.iter().map(LibKind::crate_type).collect() + } + TargetKind::CustomBuild | + TargetKind::Bench | + TargetKind::Test | + TargetKind::ExampleBin | + TargetKind::Bin => vec!["bin"], + } + } + + pub fn can_lto(&self) -> bool { + match self.kind { + TargetKind::Lib(ref v) => { + !v.contains(&LibKind::Rlib) && + !v.contains(&LibKind::Dylib) && + !v.contains(&LibKind::Lib) + } + _ => true, + } + } + + pub fn set_tested(&mut self, tested: bool) -> &mut Target { + self.tested = tested; + self + } + pub fn set_benched(&mut self, benched: bool) -> &mut Target { + self.benched = benched; + self + } + pub fn set_doctest(&mut self, doctest: bool) -> &mut Target { + self.doctest = doctest; + self + } + pub fn set_for_host(&mut self, for_host: bool) -> &mut Target { + self.for_host = for_host; + self + } + pub fn set_harness(&mut self, harness: bool) -> &mut Target { + self.harness = harness; + self + } + pub fn set_doc(&mut self, doc: bool) -> &mut Target { + self.doc = doc; + self + } +} + +impl fmt::Display for Target { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.kind { + TargetKind::Lib(..) => write!(f, "Target(lib)"), + TargetKind::Bin => write!(f, "Target(bin: {})", self.name), + TargetKind::Test => write!(f, "Target(test: {})", self.name), + TargetKind::Bench => write!(f, "Target(bench: {})", self.name), + TargetKind::ExampleBin | + TargetKind::ExampleLib(..) => write!(f, "Target(example: {})", self.name), + TargetKind::CustomBuild => write!(f, "Target(script)"), + } + } +} + +impl Profile { + pub fn default_dev() -> Profile { + Profile { + debuginfo: Some(2), + debug_assertions: true, + overflow_checks: true, + ..Profile::default() + } + } + + pub fn default_release() -> Profile { + Profile { + opt_level: "3".to_string(), + debuginfo: None, + ..Profile::default() + } + } + + pub fn default_test() -> Profile { + Profile { + test: true, + ..Profile::default_dev() + } + } + + pub fn default_bench() -> Profile { + Profile { + test: true, + ..Profile::default_release() + } + } + + pub fn default_doc() -> Profile { + Profile { + doc: true, + ..Profile::default_dev() + } + } + + pub fn default_custom_build() -> Profile { + Profile { + run_custom_build: true, + ..Profile::default_dev() + } + } + + pub fn default_check() -> Profile { + Profile { + check: true, + ..Profile::default_dev() + } + } + + pub fn default_doctest() -> Profile { + Profile { + doc: true, + test: true, + ..Profile::default_dev() + } + } +} + +impl Default for Profile { + fn default() -> Profile { + Profile { + opt_level: "0".to_string(), + lto: false, + codegen_units: None, + rustc_args: None, + rustdoc_args: None, + debuginfo: None, + debug_assertions: false, + overflow_checks: false, + rpath: false, + test: false, + doc: false, + run_custom_build: false, + check: false, + panic: None, + } + } +} + +impl fmt::Display for Profile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if self.test { + write!(f, "Profile(test)") + } else if self.doc { + write!(f, "Profile(doc)") + } else if self.run_custom_build { + write!(f, "Profile(run)") + } else if self.check { + write!(f, "Profile(check)") + } else { + write!(f, "Profile(build)") + } + + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/core/mod.rs new file mode 100644 index 000000000..6b4e3906f --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/mod.rs @@ -0,0 +1,26 @@ +pub use self::dependency::Dependency; +pub use self::features::{Features, Feature, CliUnstable}; +pub use self::manifest::{EitherManifest, VirtualManifest}; +pub use self::manifest::{Manifest, Target, TargetKind, Profile, LibKind, Profiles}; +pub use self::package::{Package, PackageSet}; +pub use self::package_id::PackageId; +pub use self::package_id_spec::PackageIdSpec; +pub use self::registry::Registry; +pub use self::resolver::Resolve; +pub use self::shell::{Shell, Verbosity}; +pub use self::source::{Source, SourceId, SourceMap, GitReference}; +pub use self::summary::Summary; +pub use self::workspace::{Members, Workspace, WorkspaceConfig, WorkspaceRootConfig}; + +pub mod source; +pub mod package; +pub mod package_id; +pub mod dependency; +pub mod manifest; +pub mod resolver; +pub mod summary; +pub mod shell; +pub mod registry; +mod package_id_spec; +mod workspace; +mod features; diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/package.rs b/collector/compile-benchmarks/cargo/src/cargo/core/package.rs new file mode 100644 index 000000000..885dad937 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/package.rs @@ -0,0 +1,219 @@ +use std::cell::{Ref, RefCell}; +use std::collections::{HashMap, BTreeMap}; +use std::fmt; +use std::hash; +use std::path::{Path, PathBuf}; + +use semver::Version; +use serde::ser; +use toml; + +use core::{Dependency, Manifest, PackageId, SourceId, Target}; +use core::{Summary, SourceMap}; +use ops; +use util::{Config, LazyCell, internal, lev_distance}; +use util::errors::{CargoResult, CargoResultExt}; + +/// Information about a package that is available somewhere in the file system. +/// +/// A package is a `Cargo.toml` file plus all the files that are part of it. +// TODO: Is manifest_path a relic? +#[derive(Clone, Debug)] +pub struct Package { + /// The package's manifest + manifest: Manifest, + /// The root of the package + manifest_path: PathBuf, +} + +/// A Package in a form where `Serialize` can be derived. +#[derive(Serialize)] +struct SerializedPackage<'a> { + name: &'a str, + version: &'a str, + id: &'a PackageId, + license: Option<&'a str>, + license_file: Option<&'a str>, + description: Option<&'a str>, + source: &'a SourceId, + dependencies: &'a [Dependency], + targets: &'a [Target], + features: &'a BTreeMap>, + manifest_path: &'a str, +} + +impl ser::Serialize for Package { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + let summary = self.manifest.summary(); + let package_id = summary.package_id(); + let manmeta = self.manifest.metadata(); + let license = manmeta.license.as_ref().map(String::as_ref); + let license_file = manmeta.license_file.as_ref().map(String::as_ref); + let description = manmeta.description.as_ref().map(String::as_ref); + + SerializedPackage { + name: package_id.name(), + version: &package_id.version().to_string(), + id: package_id, + license: license, + license_file: license_file, + description: description, + source: summary.source_id(), + dependencies: summary.dependencies(), + targets: self.manifest.targets(), + features: summary.features(), + manifest_path: &self.manifest_path.display().to_string(), + }.serialize(s) + } +} + +impl Package { + /// Create a package from a manifest and its location + pub fn new(manifest: Manifest, + manifest_path: &Path) -> Package { + Package { + manifest: manifest, + manifest_path: manifest_path.to_path_buf(), + } + } + + /// Calculate the Package from the manifest path (and cargo configuration). + pub fn for_path(manifest_path: &Path, config: &Config) -> CargoResult { + let path = manifest_path.parent().unwrap(); + let source_id = SourceId::for_path(path)?; + let (pkg, _) = ops::read_package(manifest_path, &source_id, config)?; + Ok(pkg) + } + + /// Get the manifest dependencies + pub fn dependencies(&self) -> &[Dependency] { self.manifest.dependencies() } + /// Get the manifest + pub fn manifest(&self) -> &Manifest { &self.manifest } + /// Get the path to the manifest + pub fn manifest_path(&self) -> &Path { &self.manifest_path } + /// Get the name of the package + pub fn name(&self) -> &str { self.package_id().name() } + /// Get the PackageId object for the package (fully defines a packge) + pub fn package_id(&self) -> &PackageId { self.manifest.package_id() } + /// Get the root folder of the package + pub fn root(&self) -> &Path { self.manifest_path.parent().unwrap() } + /// Get the summary for the package + pub fn summary(&self) -> &Summary { self.manifest.summary() } + /// Get the targets specified in the manifest + pub fn targets(&self) -> &[Target] { self.manifest.targets() } + /// Get the current package version + pub fn version(&self) -> &Version { self.package_id().version() } + /// Get the package authors + pub fn authors(&self) -> &Vec { &self.manifest.metadata().authors } + /// Whether the package is set to publish + pub fn publish(&self) -> bool { self.manifest.publish() } + + /// Whether the package uses a custom build script for any target + pub fn has_custom_build(&self) -> bool { + self.targets().iter().any(|t| t.is_custom_build()) + } + + pub fn find_closest_target(&self, + target: &str, + is_expected_kind: fn(&Target)-> bool) -> Option<&Target> { + let targets = self.targets(); + + let matches = targets.iter().filter(|t| is_expected_kind(t)) + .map(|t| (lev_distance(target, t.name()), t)) + .filter(|&(d, _)| d < 4); + matches.min_by_key(|t| t.0).map(|t| t.1) + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) + -> Package { + Package { + manifest: self.manifest.map_source(to_replace, replace_with), + manifest_path: self.manifest_path, + } + } + + pub fn to_registry_toml(&self) -> String { + let manifest = self.manifest().original().prepare_for_publish(); + let toml = toml::to_string(&manifest).unwrap(); + format!("\ + # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO\n\ + #\n\ + # When uploading crates to the registry Cargo will automatically\n\ + # \"normalize\" Cargo.toml files for maximal compatibility\n\ + # with all versions of Cargo and also rewrite `path` dependencies\n\ + # to registry (e.g. crates.io) dependencies\n\ + #\n\ + # If you believe there's an error in this file please file an\n\ + # issue against the rust-lang/cargo repository. If you're\n\ + # editing this file be aware that the upstream Cargo.toml\n\ + # will likely look very different (and much more reasonable)\n\ + \n\ + {}\ + ", toml) + } +} + +impl fmt::Display for Package { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.summary().package_id()) + } +} + +impl PartialEq for Package { + fn eq(&self, other: &Package) -> bool { + self.package_id() == other.package_id() + } +} + +impl Eq for Package {} + +impl hash::Hash for Package { + fn hash(&self, into: &mut H) { + self.package_id().hash(into) + } +} + +pub struct PackageSet<'cfg> { + packages: HashMap>, + sources: RefCell>, +} + +impl<'cfg> PackageSet<'cfg> { + pub fn new(package_ids: &[PackageId], + sources: SourceMap<'cfg>) -> PackageSet<'cfg> { + PackageSet { + packages: package_ids.iter().map(|id| { + (id.clone(), LazyCell::new()) + }).collect(), + sources: RefCell::new(sources), + } + } + + pub fn package_ids<'a>(&'a self) -> Box + 'a> { + Box::new(self.packages.keys()) + } + + pub fn get(&self, id: &PackageId) -> CargoResult<&Package> { + let slot = self.packages.get(id).ok_or_else(|| { + internal(format!("couldn't find `{}` in package set", id)) + })?; + if let Some(pkg) = slot.borrow() { + return Ok(pkg) + } + let mut sources = self.sources.borrow_mut(); + let source = sources.get_mut(id.source_id()).ok_or_else(|| { + internal(format!("couldn't find source for `{}`", id)) + })?; + let pkg = source.download(id).chain_err(|| { + "unable to get packages from source" + })?; + assert!(slot.fill(pkg).is_ok()); + Ok(slot.borrow().unwrap()) + } + + pub fn sources(&self) -> Ref> { + self.sources.borrow() + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/package_id.rs b/collector/compile-benchmarks/cargo/src/cargo/core/package_id.rs new file mode 100644 index 000000000..908798555 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/package_id.rs @@ -0,0 +1,190 @@ +use std::cmp::Ordering; +use std::fmt::{self, Formatter}; +use std::hash::Hash; +use std::hash; +use std::path::Path; +use std::sync::Arc; + +use semver; +use serde::de; +use serde::ser; + +use util::{CargoResult, ToSemver}; +use core::source::SourceId; + +/// Identifier for a specific version of a package in a specific source. +#[derive(Clone)] +pub struct PackageId { + inner: Arc, +} + +#[derive(PartialEq, PartialOrd, Eq, Ord)] +struct PackageIdInner { + name: String, + version: semver::Version, + source_id: SourceId, +} + +impl ser::Serialize for PackageId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer + { + s.collect_str(&format_args!("{} {} ({})", + self.inner.name, + self.inner.version, + self.inner.source_id.to_url())) + } +} + +impl<'de> de::Deserialize<'de> for PackageId { + fn deserialize(d: D) -> Result + where D: de::Deserializer<'de> + { + let string = String::deserialize(d)?; + let mut s = string.splitn(3, ' '); + let name = s.next().unwrap(); + let version = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let version = semver::Version::parse(version) + .map_err(de::Error::custom)?; + let url = match s.next() { + Some(s) => s, + None => return Err(de::Error::custom("invalid serialized PackageId")), + }; + let url = if url.starts_with('(') && url.ends_with(')') { + &url[1..url.len() - 1] + } else { + return Err(de::Error::custom("invalid serialized PackageId")) + + }; + let source_id = SourceId::from_url(url).map_err(de::Error::custom)?; + + Ok(PackageId { + inner: Arc::new(PackageIdInner { + name: name.to_string(), + version: version, + source_id: source_id, + }), + }) + } +} + +impl Hash for PackageId { + fn hash(&self, state: &mut S) { + self.inner.name.hash(state); + self.inner.version.hash(state); + self.inner.source_id.hash(state); + } +} + +impl PartialEq for PackageId { + fn eq(&self, other: &PackageId) -> bool { + (*self.inner).eq(&*other.inner) + } +} +impl PartialOrd for PackageId { + fn partial_cmp(&self, other: &PackageId) -> Option { + (*self.inner).partial_cmp(&*other.inner) + } +} +impl Eq for PackageId {} +impl Ord for PackageId { + fn cmp(&self, other: &PackageId) -> Ordering { + (*self.inner).cmp(&*other.inner) + } +} + +impl PackageId { + pub fn new(name: &str, version: T, + sid: &SourceId) -> CargoResult { + let v = version.to_semver()?; + Ok(PackageId { + inner: Arc::new(PackageIdInner { + name: name.to_string(), + version: v, + source_id: sid.clone(), + }), + }) + } + + pub fn name(&self) -> &str { &self.inner.name } + pub fn version(&self) -> &semver::Version { &self.inner.version } + pub fn source_id(&self) -> &SourceId { &self.inner.source_id } + + pub fn with_precise(&self, precise: Option) -> PackageId { + PackageId { + inner: Arc::new(PackageIdInner { + name: self.inner.name.to_string(), + version: self.inner.version.clone(), + source_id: self.inner.source_id.with_precise(precise), + }), + } + } + + pub fn with_source_id(&self, source: &SourceId) -> PackageId { + PackageId { + inner: Arc::new(PackageIdInner { + name: self.inner.name.to_string(), + version: self.inner.version.clone(), + source_id: source.clone(), + }), + } + } + + pub fn stable_hash<'a>(&'a self, workspace: &'a Path) -> PackageIdStableHash<'a> { + PackageIdStableHash(self, workspace) + } +} + +pub struct PackageIdStableHash<'a>(&'a PackageId, &'a Path); + +impl<'a> Hash for PackageIdStableHash<'a> { + fn hash(&self, state: &mut S) { + self.0.inner.name.hash(state); + self.0.inner.version.hash(state); + self.0.inner.source_id.stable_hash(self.1, state); + } +} + +impl fmt::Display for PackageId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{} v{}", self.inner.name, self.inner.version)?; + + if !self.inner.source_id.is_default_registry() { + write!(f, " ({})", self.inner.source_id)?; + } + + Ok(()) + } +} + +impl fmt::Debug for PackageId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + f.debug_struct("PackageId") + .field("name", &self.inner.name) + .field("version", &self.inner.version.to_string()) + .field("source", &self.inner.source_id.to_string()) + .finish() + } +} + +#[cfg(test)] +mod tests { + use super::PackageId; + use core::source::SourceId; + use sources::CRATES_IO; + use util::ToUrl; + + #[test] + fn invalid_version_handled_nicely() { + let loc = CRATES_IO.to_url().unwrap(); + let repo = SourceId::for_registry(&loc).unwrap(); + + assert!(PackageId::new("foo", "1.0", &repo).is_err()); + assert!(PackageId::new("foo", "1", &repo).is_err()); + assert!(PackageId::new("foo", "bar", &repo).is_err()); + assert!(PackageId::new("foo", "", &repo).is_err()); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/package_id_spec.rs b/collector/compile-benchmarks/cargo/src/cargo/core/package_id_spec.rs new file mode 100644 index 000000000..d271f2f66 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/package_id_spec.rs @@ -0,0 +1,280 @@ +use std::collections::HashMap; +use std::fmt; + +use semver::Version; +use url::Url; + +use core::PackageId; +use util::{ToUrl, ToSemver}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct PackageIdSpec { + name: String, + version: Option, + url: Option, +} + +impl PackageIdSpec { + pub fn parse(spec: &str) -> CargoResult { + if spec.contains('/') { + if let Ok(url) = spec.to_url() { + return PackageIdSpec::from_url(url); + } + if !spec.contains("://") { + if let Ok(url) = Url::parse(&format!("cargo://{}", spec)) { + return PackageIdSpec::from_url(url); + } + } + } + let mut parts = spec.splitn(2, ':'); + let name = parts.next().unwrap(); + let version = match parts.next() { + Some(version) => Some(Version::parse(version)?), + None => None, + }; + for ch in name.chars() { + if !ch.is_alphanumeric() && ch != '_' && ch != '-' { + bail!("invalid character in pkgid `{}`: `{}`", spec, ch) + } + } + Ok(PackageIdSpec { + name: name.to_string(), + version: version, + url: None, + }) + } + + pub fn query_str<'a, I>(spec: &str, i: I) -> CargoResult<&'a PackageId> + where I: IntoIterator + { + let spec = PackageIdSpec::parse(spec).chain_err(|| { + format!("invalid package id specification: `{}`", spec) + })?; + spec.query(i) + } + + pub fn from_package_id(package_id: &PackageId) -> PackageIdSpec { + PackageIdSpec { + name: package_id.name().to_string(), + version: Some(package_id.version().clone()), + url: Some(package_id.source_id().url().clone()), + } + } + + fn from_url(mut url: Url) -> CargoResult { + if url.query().is_some() { + bail!("cannot have a query string in a pkgid: {}", url) + } + let frag = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + let (name, version) = { + let mut path = url.path_segments().ok_or_else(|| { + CargoError::from(format!("pkgid urls must have a path: {}", url)) + })?; + let path_name = path.next_back().ok_or_else(|| { + CargoError::from(format!("pkgid urls must have at least one path \ + component: {}", url)) + })?; + match frag { + Some(fragment) => { + let mut parts = fragment.splitn(2, ':'); + let name_or_version = parts.next().unwrap(); + match parts.next() { + Some(part) => { + let version = part.to_semver()?; + (name_or_version.to_string(), Some(version)) + } + None => { + if name_or_version.chars().next().unwrap() + .is_alphabetic() { + (name_or_version.to_string(), None) + } else { + let version = name_or_version.to_semver()?; + (path_name.to_string(), Some(version)) + } + } + } + } + None => (path_name.to_string(), None), + } + }; + Ok(PackageIdSpec { + name: name, + version: version, + url: Some(url), + }) + } + + pub fn name(&self) -> &str { &self.name } + pub fn version(&self) -> Option<&Version> { self.version.as_ref() } + pub fn url(&self) -> Option<&Url> { self.url.as_ref() } + + pub fn set_url(&mut self, url: Url) { + self.url = Some(url); + } + + pub fn matches(&self, package_id: &PackageId) -> bool { + if self.name() != package_id.name() { return false } + + if let Some(ref v) = self.version { + if v != package_id.version() { + return false; + } + } + + match self.url { + Some(ref u) => u == package_id.source_id().url(), + None => true + } + } + + pub fn query<'a, I>(&self, i: I) -> CargoResult<&'a PackageId> + where I: IntoIterator + { + let mut ids = i.into_iter().filter(|p| self.matches(*p)); + let ret = match ids.next() { + Some(id) => id, + None => bail!("package id specification `{}` \ + matched no packages", self), + }; + return match ids.next() { + Some(other) => { + let mut msg = format!("There are multiple `{}` packages in \ + your project, and the specification \ + `{}` is ambiguous.\n\ + Please re-run this command \ + with `-p ` where `` is one \ + of the following:", + self.name(), self); + let mut vec = vec![ret, other]; + vec.extend(ids); + minimize(&mut msg, &vec, self); + Err(msg.into()) + } + None => Ok(ret) + }; + + fn minimize(msg: &mut String, + ids: &[&PackageId], + spec: &PackageIdSpec) { + let mut version_cnt = HashMap::new(); + for id in ids { + *version_cnt.entry(id.version()).or_insert(0) += 1; + } + for id in ids { + if version_cnt[id.version()] == 1 { + msg.push_str(&format!("\n {}:{}", spec.name(), + id.version())); + } else { + msg.push_str(&format!("\n {}", + PackageIdSpec::from_package_id(*id))); + } + } + } + } +} + +impl fmt::Display for PackageIdSpec { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut printed_name = false; + match self.url { + Some(ref url) => { + if url.scheme() == "cargo" { + write!(f, "{}{}", url.host().unwrap(), url.path())?; + } else { + write!(f, "{}", url)?; + } + if url.path_segments().unwrap().next_back().unwrap() != self.name { + printed_name = true; + write!(f, "#{}", self.name)?; + } + } + None => { printed_name = true; write!(f, "{}", self.name)? } + } + if let Some(ref v) = self.version { + write!(f, "{}{}", if printed_name {":"} else {"#"}, v)?; + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use core::{PackageId, SourceId}; + use super::PackageIdSpec; + use url::Url; + use semver::Version; + + #[test] + fn good_parsing() { + fn ok(spec: &str, expected: PackageIdSpec) { + let parsed = PackageIdSpec::parse(spec).unwrap(); + assert_eq!(parsed, expected); + assert_eq!(parsed.to_string(), spec); + } + + ok("http://crates.io/foo#1.2.3", PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }); + ok("http://crates.io/foo#bar:1.2.3", PackageIdSpec { + name: "bar".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("http://crates.io/foo").unwrap()), + }); + ok("crates.io/foo", PackageIdSpec { + name: "foo".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }); + ok("crates.io/foo#1.2.3", PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }); + ok("crates.io/foo#bar", PackageIdSpec { + name: "bar".to_string(), + version: None, + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }); + ok("crates.io/foo#bar:1.2.3", PackageIdSpec { + name: "bar".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: Some(Url::parse("cargo://crates.io/foo").unwrap()), + }); + ok("foo", PackageIdSpec { + name: "foo".to_string(), + version: None, + url: None, + }); + ok("foo:1.2.3", PackageIdSpec { + name: "foo".to_string(), + version: Some(Version::parse("1.2.3").unwrap()), + url: None, + }); + } + + #[test] + fn bad_parsing() { + assert!(PackageIdSpec::parse("baz:").is_err()); + assert!(PackageIdSpec::parse("baz:*").is_err()); + assert!(PackageIdSpec::parse("baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://baz:1.0").is_err()); + assert!(PackageIdSpec::parse("http://#baz:1.0").is_err()); + } + + #[test] + fn matching() { + let url = Url::parse("http://example.com").unwrap(); + let sid = SourceId::for_registry(&url).unwrap(); + let foo = PackageId::new("foo", "1.2.3", &sid).unwrap(); + let bar = PackageId::new("bar", "1.2.3", &sid).unwrap(); + + assert!( PackageIdSpec::parse("foo").unwrap().matches(&foo)); + assert!(!PackageIdSpec::parse("foo").unwrap().matches(&bar)); + assert!( PackageIdSpec::parse("foo:1.2.3").unwrap().matches(&foo)); + assert!(!PackageIdSpec::parse("foo:1.2.2").unwrap().matches(&foo)); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/registry.rs b/collector/compile-benchmarks/cargo/src/cargo/core/registry.rs new file mode 100644 index 000000000..517486801 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/registry.rs @@ -0,0 +1,607 @@ +use std::collections::HashMap; + +use semver::VersionReq; +use url::Url; + +use core::{Source, SourceId, SourceMap, Summary, Dependency, PackageId}; +use core::PackageSet; +use util::{Config, profile}; +use util::errors::{CargoResult, CargoResultExt}; +use sources::config::SourceConfigMap; + +/// Source of information about a group of packages. +/// +/// See also `core::Source`. +pub trait Registry { + /// Attempt to find the packages that match a dependency request. + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()>; + + fn query_vec(&mut self, dep: &Dependency) -> CargoResult> { + let mut ret = Vec::new(); + self.query(dep, &mut |s| ret.push(s))?; + Ok(ret) + } + + /// Returns whether or not this registry will return summaries with + /// checksums listed. + fn supports_checksums(&self) -> bool; + + /// Returns whether or not this registry will return summaries with + /// the `precise` field in the source id listed. + fn requires_precise(&self) -> bool; +} + +impl<'a, T: ?Sized + Registry + 'a> Registry for Box { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + (**self).query(dep, f) + } + + fn supports_checksums(&self) -> bool { + (**self).supports_checksums() + } + + fn requires_precise(&self) -> bool { + (**self).requires_precise() + } +} + +/// This structure represents a registry of known packages. It internally +/// contains a number of `Box` instances which are used to load a +/// `Package` from. +/// +/// The resolution phase of Cargo uses this to drive knowledge about new +/// packages as well as querying for lists of new packages. It is here that +/// sources are updated (e.g. network operations) and overrides are +/// handled. +/// +/// The general idea behind this registry is that it is centered around the +/// `SourceMap` structure, contained within which is a mapping of a `SourceId` to +/// a `Source`. Each `Source` in the map has been updated (using network +/// operations if necessary) and is ready to be queried for packages. +pub struct PackageRegistry<'cfg> { + sources: SourceMap<'cfg>, + + // A list of sources which are considered "overrides" which take precedent + // when querying for packages. + overrides: Vec, + + // Note that each SourceId does not take into account its `precise` field + // when hashing or testing for equality. When adding a new `SourceId`, we + // want to avoid duplicates in the `SourceMap` (to prevent re-updating the + // same git repo twice for example), but we also want to ensure that the + // loaded source is always updated. + // + // Sources with a `precise` field normally don't need to be updated because + // their contents are already on disk, but sources without a `precise` field + // almost always need to be updated. If we have a cached `Source` for a + // precise `SourceId`, then when we add a new `SourceId` that is not precise + // we want to ensure that the underlying source is updated. + // + // This is basically a long-winded way of saying that we want to know + // precisely what the keys of `sources` are, so this is a mapping of key to + // what exactly the key is. + source_ids: HashMap, + + locked: LockedMap, + source_config: SourceConfigMap<'cfg>, + patches: HashMap>, +} + +type LockedMap = HashMap)>>>; + +#[derive(PartialEq, Eq, Clone, Copy)] +enum Kind { + Override, + Locked, + Normal, +} + +impl<'cfg> PackageRegistry<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let source_config = SourceConfigMap::new(config)?; + Ok(PackageRegistry { + sources: SourceMap::new(), + source_ids: HashMap::new(), + overrides: Vec::new(), + source_config: source_config, + locked: HashMap::new(), + patches: HashMap::new(), + }) + } + + pub fn get(self, package_ids: &[PackageId]) -> PackageSet<'cfg> { + trace!("getting packages; sources={}", self.sources.len()); + PackageSet::new(package_ids, self.sources) + } + + fn ensure_loaded(&mut self, namespace: &SourceId, kind: Kind) -> CargoResult<()> { + match self.source_ids.get(namespace) { + // We've previously loaded this source, and we've already locked it, + // so we're not allowed to change it even if `namespace` has a + // slightly different precise version listed. + Some(&(_, Kind::Locked)) => { + debug!("load/locked {}", namespace); + return Ok(()) + } + + // If the previous source was not a precise source, then we can be + // sure that it's already been updated if we've already loaded it. + Some(&(ref previous, _)) if previous.precise().is_none() => { + debug!("load/precise {}", namespace); + return Ok(()) + } + + // If the previous source has the same precise version as we do, + // then we're done, otherwise we need to need to move forward + // updating this source. + Some(&(ref previous, _)) => { + if previous.precise() == namespace.precise() { + debug!("load/match {}", namespace); + return Ok(()) + } + debug!("load/mismatch {}", namespace); + } + None => { + debug!("load/missing {}", namespace); + } + } + + self.load(namespace, kind)?; + Ok(()) + } + + pub fn add_sources(&mut self, ids: &[SourceId]) -> CargoResult<()> { + for id in ids.iter() { + self.ensure_loaded(id, Kind::Locked)?; + } + Ok(()) + } + + pub fn add_preloaded(&mut self, source: Box) { + self.add_source(source, Kind::Locked); + } + + fn add_source(&mut self, source: Box, kind: Kind) { + let id = source.source_id().clone(); + self.sources.insert(source); + self.source_ids.insert(id.clone(), (id, kind)); + } + + pub fn add_override(&mut self, source: Box) { + self.overrides.push(source.source_id().clone()); + self.add_source(source, Kind::Override); + } + + pub fn register_lock(&mut self, id: PackageId, deps: Vec) { + trace!("register_lock: {}", id); + for dep in deps.iter() { + trace!("\t-> {}", dep); + } + let sub_map = self.locked.entry(id.source_id().clone()) + .or_insert(HashMap::new()); + let sub_vec = sub_map.entry(id.name().to_string()) + .or_insert(Vec::new()); + sub_vec.push((id, deps)); + } + + pub fn patch(&mut self, url: &Url, deps: &[Dependency]) -> CargoResult<()> { + let deps = deps.iter().map(|dep| { + let mut summaries = self.query_vec(dep)?.into_iter(); + let summary = match summaries.next() { + Some(summary) => summary, + None => { + bail!("patch for `{}` in `{}` did not resolve to any crates", + dep.name(), url) + } + }; + if summaries.next().is_some() { + bail!("patch for `{}` in `{}` resolved to more than one candidate", + dep.name(), url) + } + if summary.package_id().source_id().url() == url { + bail!("patch for `{}` in `{}` points to the same source, but \ + patches must point to different sources", + dep.name(), url); + } + Ok(summary) + }).collect::>>().chain_err(|| { + format!("failed to resolve patches for `{}`", url) + })?; + + self.patches.insert(url.clone(), deps); + + Ok(()) + } + + pub fn patches(&self) -> &HashMap> { + &self.patches + } + + fn load(&mut self, source_id: &SourceId, kind: Kind) -> CargoResult<()> { + (|| { + let source = self.source_config.load(source_id)?; + assert_eq!(source.source_id(), source_id); + + if kind == Kind::Override { + self.overrides.push(source_id.clone()); + } + self.add_source(source, kind); + + // Ensure the source has fetched all necessary remote data. + let _p = profile::start(format!("updating: {}", source_id)); + self.sources.get_mut(source_id).unwrap().update() + })().chain_err(|| format!("Unable to update {}", source_id)) + } + + fn query_overrides(&mut self, dep: &Dependency) + -> CargoResult> { + for s in self.overrides.iter() { + let src = self.sources.get_mut(s).unwrap(); + let dep = Dependency::new_override(dep.name(), s); + let mut results = src.query_vec(&dep)?; + if !results.is_empty() { + return Ok(Some(results.remove(0))) + } + } + Ok(None) + } + + /// This function is used to transform a summary to another locked summary + /// if possible. This is where the concept of a lockfile comes into play. + /// + /// If a summary points at a package id which was previously locked, then we + /// override the summary's id itself, as well as all dependencies, to be + /// rewritten to the locked versions. This will transform the summary's + /// source to a precise source (listed in the locked version) as well as + /// transforming all of the dependencies from range requirements on + /// imprecise sources to exact requirements on precise sources. + /// + /// If a summary does not point at a package id which was previously locked, + /// or if any dependencies were added and don't have a previously listed + /// version, we still want to avoid updating as many dependencies as + /// possible to keep the graph stable. In this case we map all of the + /// summary's dependencies to be rewritten to a locked version wherever + /// possible. If we're unable to map a dependency though, we just pass it on + /// through. + pub fn lock(&self, summary: Summary) -> Summary { + lock(&self.locked, &self.patches, summary) + } + + fn warn_bad_override(&self, + override_summary: &Summary, + real_summary: &Summary) -> CargoResult<()> { + let mut real_deps = real_summary.dependencies().iter().collect::>(); + + let boilerplate = "\ +This is currently allowed but is known to produce buggy behavior with spurious +recompiles and changes to the crate graph. Path overrides unfortunately were +never intended to support this feature, so for now this message is just a +warning. In the future, however, this message will become a hard error. + +To change the dependency graph via an override it's recommended to use the +`[replace]` feature of Cargo instead of the path override feature. This is +documented online at the url below for more information. + +http://doc.crates.io/specifying-dependencies.html#overriding-dependencies +"; + + for dep in override_summary.dependencies() { + if let Some(i) = real_deps.iter().position(|d| dep == *d) { + real_deps.remove(i); + continue + } + let msg = format!("\ + path override for crate `{}` has altered the original list of\n\ + dependencies; the dependency on `{}` was either added or\n\ + modified to not match the previously resolved version\n\n\ + {}", override_summary.package_id().name(), dep.name(), boilerplate); + self.source_config.config().shell().warn(&msg)?; + return Ok(()) + } + + if let Some(id) = real_deps.get(0) { + let msg = format!("\ + path override for crate `{}` has altered the original list of + dependencies; the dependency on `{}` was removed\n\n + {}", override_summary.package_id().name(), id.name(), boilerplate); + self.source_config.config().shell().warn(&msg)?; + return Ok(()) + } + + Ok(()) + } +} + +impl<'cfg> Registry for PackageRegistry<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + let (override_summary, n, to_warn) = { + // Look for an override and get ready to query the real source. + let override_summary = self.query_overrides(dep)?; + + // Next up on our list of candidates is to check the `[patch]` + // section of the manifest. Here we look through all patches + // relevant to the source that `dep` points to, and then we match + // name/version. Note that we don't use `dep.matches(..)` because + // the patches, by definition, come from a different source. + // This means that `dep.matches(..)` will always return false, when + // what we really care about is the name/version match. + let mut patches = Vec::

::new(); + if let Some(extra) = self.patches.get(dep.source_id().url()) { + patches.extend(extra.iter().filter(|s| { + dep.matches_ignoring_source(s) + }).cloned()); + } + + // A crucial feature of the `[patch]` feature is that we *don't* + // query the actual registry if we have a "locked" dependency. A + // locked dep basically just means a version constraint of `=a.b.c`, + // and because patches take priority over the actual source then if + // we have a candidate we're done. + if patches.len() == 1 && dep.is_locked() { + let patch = patches.remove(0); + match override_summary { + Some(summary) => (summary, 1, Some(patch)), + None => { + f(patch); + return Ok(()) + } + } + } else { + if !patches.is_empty() { + debug!("found {} patches with an unlocked dep, \ + looking at sources", patches.len()); + } + + // Ensure the requested source_id is loaded + self.ensure_loaded(dep.source_id(), Kind::Normal).chain_err(|| { + format!("failed to load source for a dependency \ + on `{}`", dep.name()) + })?; + + let source = self.sources.get_mut(dep.source_id()); + match (override_summary, source) { + (Some(_), None) => bail!("override found but no real ones"), + (None, None) => return Ok(()), + + // If we don't have an override then we just ship + // everything upstairs after locking the summary + (None, Some(source)) => { + for patch in patches.iter() { + f(patch.clone()); + } + + // Our sources shouldn't ever come back to us with two + // summaries that have the same version. We could, + // however, have an `[patch]` section which is in use + // to override a version in the registry. This means + // that if our `summary` in this loop has the same + // version as something in `patches` that we've + // already selected, then we skip this `summary`. + let locked = &self.locked; + let all_patches = &self.patches; + return source.query(dep, &mut |summary| { + for patch in patches.iter() { + let patch = patch.package_id().version(); + if summary.package_id().version() == patch { + return + } + } + f(lock(locked, all_patches, summary)) + }) + } + + // If we have an override summary then we query the source + // to sanity check its results. We don't actually use any of + // the summaries it gives us though. + (Some(override_summary), Some(source)) => { + if !patches.is_empty() { + bail!("found patches and a path override") + } + let mut n = 0; + let mut to_warn = None; + source.query(dep, &mut |summary| { + n += 1; + to_warn = Some(summary); + })?; + (override_summary, n, to_warn) + } + } + } + }; + + if n > 1 { + bail!("found an override with a non-locked list"); + } else if let Some(summary) = to_warn { + self.warn_bad_override(&override_summary, &summary)?; + } + f(self.lock(override_summary)); + Ok(()) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + false + } +} + +fn lock(locked: &LockedMap, + patches: &HashMap>, + summary: Summary) -> Summary { + let pair = locked.get(summary.source_id()).and_then(|map| { + map.get(summary.name()) + }).and_then(|vec| { + vec.iter().find(|&&(ref id, _)| id == summary.package_id()) + }); + + trace!("locking summary of {}", summary.package_id()); + + // Lock the summary's id if possible + let summary = match pair { + Some(&(ref precise, _)) => summary.override_id(precise.clone()), + None => summary, + }; + summary.map_dependencies(|dep| { + trace!("\t{}/{}/{}", dep.name(), dep.version_req(), + dep.source_id()); + + // If we've got a known set of overrides for this summary, then + // one of a few cases can arise: + // + // 1. We have a lock entry for this dependency from the same + // source as it's listed as coming from. In this case we make + // sure to lock to precisely the given package id. + // + // 2. We have a lock entry for this dependency, but it's from a + // different source than what's listed, or the version + // requirement has changed. In this case we must discard the + // locked version because the dependency needs to be + // re-resolved. + // + // 3. We don't have a lock entry for this dependency, in which + // case it was likely an optional dependency which wasn't + // included previously so we just pass it through anyway. + // + // Cases 1/2 are handled by `matches_id` and case 3 is handled by + // falling through to the logic below. + if let Some(&(_, ref locked_deps)) = pair { + let locked = locked_deps.iter().find(|id| dep.matches_id(id)); + if let Some(locked) = locked { + trace!("\tfirst hit on {}", locked); + let mut dep = dep.clone(); + dep.lock_to(locked); + return dep + } + } + + // If this dependency did not have a locked version, then we query + // all known locked packages to see if they match this dependency. + // If anything does then we lock it to that and move on. + let v = locked.get(dep.source_id()).and_then(|map| { + map.get(dep.name()) + }).and_then(|vec| { + vec.iter().find(|&&(ref id, _)| dep.matches_id(id)) + }); + if let Some(&(ref id, _)) = v { + trace!("\tsecond hit on {}", id); + let mut dep = dep.clone(); + dep.lock_to(id); + return dep + } + + // Finally we check to see if any registered patches correspond to + // this dependency. + let v = patches.get(dep.source_id().url()).map(|vec| { + let dep2 = dep.clone(); + let mut iter = vec.iter().filter(move |s| { + dep2.name() == s.package_id().name() && + dep2.version_req().matches(s.package_id().version()) + }); + (iter.next(), iter) + }); + if let Some((Some(summary), mut remaining)) = v { + assert!(remaining.next().is_none()); + let patch_source = summary.package_id().source_id(); + let patch_locked = locked.get(patch_source).and_then(|m| { + m.get(summary.package_id().name()) + }).map(|list| { + list.iter().any(|&(ref id, _)| id == summary.package_id()) + }).unwrap_or(false); + + if patch_locked { + trace!("\tthird hit on {}", summary.package_id()); + let req = VersionReq::exact(summary.package_id().version()); + let mut dep = dep.clone(); + dep.set_version_req(req); + return dep + } + } + + trace!("\tnope, unlocked"); + dep + }) +} + +#[cfg(test)] +pub mod test { + use core::{Summary, Registry, Dependency}; + use util::CargoResult; + + pub struct RegistryBuilder { + summaries: Vec, + overrides: Vec + } + + impl RegistryBuilder { + pub fn new() -> RegistryBuilder { + RegistryBuilder { summaries: vec![], overrides: vec![] } + } + + pub fn summary(mut self, summary: Summary) -> RegistryBuilder { + self.summaries.push(summary); + self + } + + pub fn summaries(mut self, summaries: Vec) -> RegistryBuilder { + self.summaries.extend(summaries.into_iter()); + self + } + + pub fn add_override(mut self, summary: Summary) -> RegistryBuilder { + self.overrides.push(summary); + self + } + + pub fn overrides(mut self, summaries: Vec) -> RegistryBuilder { + self.overrides.extend(summaries.into_iter()); + self + } + + fn query_overrides(&self, dep: &Dependency) -> Vec { + self.overrides.iter() + .filter(|s| s.name() == dep.name()) + .map(|s| s.clone()) + .collect() + } + } + + impl Registry for RegistryBuilder { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + debug!("querying; dep={:?}", dep); + + let overrides = self.query_overrides(dep); + + if overrides.is_empty() { + for s in self.summaries.iter() { + if dep.matches(s) { + f(s.clone()); + } + } + Ok(()) + } else { + for s in overrides { + f(s); + } + Ok(()) + } + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + false + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/resolver/encode.rs b/collector/compile-benchmarks/cargo/src/cargo/core/resolver/encode.rs new file mode 100644 index 000000000..a0cf0bac0 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/resolver/encode.rs @@ -0,0 +1,420 @@ +use std::collections::{HashMap, HashSet, BTreeMap}; +use std::fmt; +use std::str::FromStr; + +use serde::ser; +use serde::de; + +use core::{Package, PackageId, SourceId, Workspace, Dependency}; +use util::{Graph, Config, internal}; +use util::errors::{CargoResult, CargoResultExt, CargoError}; + +use super::Resolve; + +#[derive(Serialize, Deserialize, Debug)] +pub struct EncodableResolve { + package: Option>, + /// `root` is optional to allow backward compatibility. + root: Option, + metadata: Option, + + #[serde(default, skip_serializing_if = "Patch::is_empty")] + patch: Patch, +} + +#[derive(Serialize, Deserialize, Debug, Default)] +struct Patch { + unused: Vec, +} + +pub type Metadata = BTreeMap; + +impl EncodableResolve { + pub fn into_resolve(self, ws: &Workspace) -> CargoResult { + let path_deps = build_path_deps(ws); + + let packages = { + let mut packages = self.package.unwrap_or_default(); + if let Some(root) = self.root { + packages.insert(0, root); + } + packages + }; + + // `PackageId`s in the lock file don't include the `source` part + // for workspace members, so we reconstruct proper ids. + let (live_pkgs, all_pkgs) = { + let mut live_pkgs = HashMap::new(); + let mut all_pkgs = HashSet::new(); + for pkg in packages.iter() { + let enc_id = EncodablePackageId { + name: pkg.name.clone(), + version: pkg.version.clone(), + source: pkg.source.clone(), + }; + + if !all_pkgs.insert(enc_id.clone()) { + return Err(internal(format!("package `{}` is specified twice in the lockfile", + pkg.name))); + } + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + // We failed to find a local package in the workspace. + // It must have been removed and should be ignored. + None => { + debug!("path dependency now missing {} v{}", + pkg.name, + pkg.version); + continue + } + Some(source) => PackageId::new(&pkg.name, &pkg.version, source)? + }; + + assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) + } + (live_pkgs, all_pkgs) + }; + + let lookup_id = |enc_id: &EncodablePackageId| -> CargoResult> { + match live_pkgs.get(enc_id) { + Some(&(ref id, _)) => Ok(Some(id.clone())), + None => if all_pkgs.contains(enc_id) { + // Package is found in the lockfile, but it is + // no longer a member of the workspace. + Ok(None) + } else { + Err(internal(format!("package `{}` is specified as a dependency, \ + but is missing from the package list", enc_id))) + } + } + }; + + let g = { + let mut g = Graph::new(); + + for &(ref id, _) in live_pkgs.values() { + g.add(id.clone(), &[]); + } + + for &(ref id, pkg) in live_pkgs.values() { + let deps = match pkg.dependencies { + Some(ref deps) => deps, + None => continue + }; + + for edge in deps.iter() { + if let Some(to_depend_on) = lookup_id(edge)? { + g.link(id.clone(), to_depend_on); + } + } + } + g + }; + + let replacements = { + let mut replacements = HashMap::new(); + for &(ref id, pkg) in live_pkgs.values() { + if let Some(ref replace) = pkg.replace { + assert!(pkg.dependencies.is_none()); + if let Some(replace_id) = lookup_id(replace)? { + replacements.insert(id.clone(), replace_id); + } + } + } + replacements + }; + + let mut metadata = self.metadata.unwrap_or_default(); + + // Parse out all package checksums. After we do this we can be in a few + // situations: + // + // * We parsed no checksums. In this situation we're dealing with an old + // lock file and we're gonna fill them all in. + // * We parsed some checksums, but not one for all packages listed. It + // could have been the case that some were listed, then an older Cargo + // client added more dependencies, and now we're going to fill in the + // missing ones. + // * There are too many checksums listed, indicative of an older Cargo + // client removing a package but not updating the checksums listed. + // + // In all of these situations they're part of normal usage, so we don't + // really worry about it. We just try to slurp up as many checksums as + // possible. + let mut checksums = HashMap::new(); + let prefix = "checksum "; + let mut to_remove = Vec::new(); + for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { + to_remove.push(k.to_string()); + let k = &k[prefix.len()..]; + let enc_id: EncodablePackageId = k.parse().chain_err(|| { + internal("invalid encoding of checksum in lockfile") + })?; + let id = match lookup_id(&enc_id) { + Ok(Some(id)) => id, + _ => continue, + }; + + let v = if v == "" { + None + } else { + Some(v.to_string()) + }; + checksums.insert(id, v); + } + + for k in to_remove { + metadata.remove(&k); + } + + let mut unused_patches = Vec::new(); + for pkg in self.patch.unused { + let id = match pkg.source.as_ref().or_else(|| path_deps.get(&pkg.name)) { + Some(src) => PackageId::new(&pkg.name, &pkg.version, src)?, + None => continue, + }; + unused_patches.push(id); + } + + Ok(Resolve { + graph: g, + empty_features: HashSet::new(), + features: HashMap::new(), + replacements: replacements, + checksums: checksums, + metadata: metadata, + unused_patches: unused_patches, + }) + } +} + +fn build_path_deps(ws: &Workspace) -> HashMap { + // If a crate is *not* a path source, then we're probably in a situation + // such as `cargo install` with a lock file from a remote dependency. In + // that case we don't need to fixup any path dependencies (as they're not + // actually path dependencies any more), so we ignore them. + let members = ws.members().filter(|p| { + p.package_id().source_id().is_path() + }).collect::>(); + + let mut ret = HashMap::new(); + let mut visited = HashSet::new(); + for member in members.iter() { + ret.insert(member.package_id().name().to_string(), + member.package_id().source_id().clone()); + visited.insert(member.package_id().source_id().clone()); + } + for member in members.iter() { + build_pkg(member, ws.config(), &mut ret, &mut visited); + } + for (_, deps) in ws.root_patch() { + for dep in deps { + build_dep(dep, ws.config(), &mut ret, &mut visited); + } + } + for &(_, ref dep) in ws.root_replace() { + build_dep(dep, ws.config(), &mut ret, &mut visited); + } + + return ret; + + fn build_pkg(pkg: &Package, + config: &Config, + ret: &mut HashMap, + visited: &mut HashSet) { + for dep in pkg.dependencies() { + build_dep(dep, config, ret, visited); + } + } + + fn build_dep(dep: &Dependency, + config: &Config, + ret: &mut HashMap, + visited: &mut HashSet) { + let id = dep.source_id(); + if visited.contains(id) || !id.is_path() { + return + } + let path = match id.url().to_file_path() { + Ok(p) => p.join("Cargo.toml"), + Err(_) => return, + }; + let pkg = match Package::for_path(&path, config) { + Ok(p) => p, + Err(_) => return, + }; + ret.insert(pkg.name().to_string(), + pkg.package_id().source_id().clone()); + visited.insert(pkg.package_id().source_id().clone()); + build_pkg(&pkg, config, ret, visited); + } +} + +impl Patch { + fn is_empty(&self) -> bool { + self.unused.is_empty() + } +} + +#[derive(Serialize, Deserialize, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct EncodableDependency { + name: String, + version: String, + source: Option, + dependencies: Option>, + replace: Option, +} + +#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] +pub struct EncodablePackageId { + name: String, + version: String, + source: Option +} + +impl fmt::Display for EncodablePackageId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.name, self.version)?; + if let Some(ref s) = self.source { + write!(f, " ({})", s.to_url())?; + } + Ok(()) + } +} + +impl FromStr for EncodablePackageId { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut s = s.splitn(3, ' '); + let name = s.next().unwrap(); + let version = s.next().ok_or_else(|| { + internal("invalid serialized PackageId") + })?; + let source_id = match s.next() { + Some(s) => { + if s.starts_with('(') && s.ends_with(')') { + Some(SourceId::from_url(&s[1..s.len() - 1])?) + } else { + bail!("invalid serialized PackageId") + } + } + None => None, + }; + + Ok(EncodablePackageId { + name: name.to_string(), + version: version.to_string(), + source: source_id + }) + } +} + +impl ser::Serialize for EncodablePackageId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + s.collect_str(self) + } +} + +impl<'de> de::Deserialize<'de> for EncodablePackageId { + fn deserialize(d: D) -> Result + where D: de::Deserializer<'de>, + { + String::deserialize(d).and_then(|string| { + string.parse::() + .map_err(de::Error::custom) + }) + } +} + +pub struct WorkspaceResolve<'a, 'cfg: 'a> { + pub ws: &'a Workspace<'cfg>, + pub resolve: &'a Resolve, +} + +impl<'a, 'cfg> ser::Serialize for WorkspaceResolve<'a, 'cfg> { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect(); + ids.sort(); + + let encodable = ids.iter().filter_map(|&id| { + Some(encodable_resolve_node(id, self.resolve)) + }).collect::>(); + + let mut metadata = self.resolve.metadata.clone(); + + for id in ids.iter().filter(|id| !id.source_id().is_path()) { + let checksum = match self.resolve.checksums[*id] { + Some(ref s) => &s[..], + None => "", + }; + let id = encodable_package_id(id); + metadata.insert(format!("checksum {}", id.to_string()), + checksum.to_string()); + } + + let metadata = if metadata.is_empty() { None } else { Some(metadata) }; + + let patch = Patch { + unused: self.resolve.unused_patches().iter().map(|id| { + EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: None, + replace: None, + } + }).collect(), + }; + EncodableResolve { + package: Some(encodable), + root: None, + metadata: metadata, + patch: patch, + }.serialize(s) + } +} + +fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) + -> EncodableDependency { + let (replace, deps) = match resolve.replacement(id) { + Some(id) => { + (Some(encodable_package_id(id)), None) + } + None => { + let mut deps = resolve.graph.edges(id) + .into_iter().flat_map(|a| a) + .map(encodable_package_id) + .collect::>(); + deps.sort(); + (None, Some(deps)) + } + }; + + EncodableDependency { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()), + dependencies: deps, + replace: replace, + } +} + +fn encodable_package_id(id: &PackageId) -> EncodablePackageId { + EncodablePackageId { + name: id.name().to_string(), + version: id.version().to_string(), + source: encode_source(id.source_id()).map(|s| s.with_precise(None)), + } +} + +fn encode_source(id: &SourceId) -> Option { + if id.is_path() { + None + } else { + Some(id.clone()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/resolver/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/core/resolver/mod.rs new file mode 100644 index 000000000..4c938f87e --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/resolver/mod.rs @@ -0,0 +1,1265 @@ +//! Resolution of the entire dependency graph for a crate +//! +//! This module implements the core logic in taking the world of crates and +//! constraints and creating a resolved graph with locked versions for all +//! crates and their dependencies. This is separate from the registry module +//! which is more worried about discovering crates from various sources, this +//! module just uses the Registry trait as a source to learn about crates from. +//! +//! Actually solving a constraint graph is an NP-hard problem. This algorithm +//! is basically a nice heuristic to make sure we get roughly the best answer +//! most of the time. The constraints that we're working with are: +//! +//! 1. Each crate can have any number of dependencies. Each dependency can +//! declare a version range that it is compatible with. +//! 2. Crates can be activated with multiple version (e.g. show up in the +//! dependency graph twice) so long as each pairwise instance have +//! semver-incompatible versions. +//! +//! The algorithm employed here is fairly simple, we simply do a DFS, activating +//! the "newest crate" (highest version) first and then going to the next +//! option. The heuristics we employ are: +//! +//! * Never try to activate a crate version which is incompatible. This means we +//! only try crates which will actually satisfy a dependency and we won't ever +//! try to activate a crate that's semver compatible with something else +//! activated (as we're only allowed to have one). +//! * Always try to activate the highest version crate first. The default +//! dependency in Cargo (e.g. when you write `foo = "0.1.2"`) is +//! semver-compatible, so selecting the highest version possible will allow us +//! to hopefully satisfy as many dependencies at once. +//! +//! Beyond that, what's implemented below is just a naive backtracking version +//! which should in theory try all possible combinations of dependencies and +//! versions to see if one works. The first resolution that works causes +//! everything to bail out immediately and return success, and only if *nothing* +//! works do we actually return an error up the stack. +//! +//! ## Performance +//! +//! Note that this is a relatively performance-critical portion of Cargo. The +//! data that we're processing is proportional to the size of the dependency +//! graph, which can often be quite large (e.g. take a look at Servo). To make +//! matters worse the DFS algorithm we're implemented is inherently quite +//! inefficient. When we add the requirement of backtracking on top it means +//! that we're implementing something that probably shouldn't be allocating all +//! over the place. + +use std::cmp::Ordering; +use std::collections::{HashSet, HashMap, BinaryHeap, BTreeMap}; +use std::iter::FromIterator; +use std::fmt; +use std::ops::Range; +use std::rc::Rc; + +use semver; +use url::Url; + +use core::{PackageId, Registry, SourceId, Summary, Dependency}; +use core::PackageIdSpec; +use util::config::Config; +use util::Graph; +use util::errors::{CargoResult, CargoError}; +use util::profile; +use util::graph::{Nodes, Edges}; + +pub use self::encode::{EncodableResolve, EncodableDependency, EncodablePackageId}; +pub use self::encode::{Metadata, WorkspaceResolve}; + +mod encode; + +/// Represents a fully resolved package dependency graph. Each node in the graph +/// is a package and edges represent dependencies between packages. +/// +/// Each instance of `Resolve` also understands the full set of features used +/// for each package. +#[derive(PartialEq)] +pub struct Resolve { + graph: Graph, + replacements: HashMap, + empty_features: HashSet, + features: HashMap>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, +} + +pub struct Deps<'a> { + edges: Option>, + resolve: &'a Resolve, +} + +pub struct DepsNotReplaced<'a> { + edges: Option>, +} + +#[derive(Clone, Copy)] +pub enum Method<'a> { + Everything, + Required { + dev_deps: bool, + features: &'a [String], + uses_default_features: bool, + }, +} + +// Information about the dependencies for a crate, a tuple of: +// +// (dependency info, candidates, features activated) +type DepInfo = (Dependency, Rc>, Rc>); + +#[derive(Clone)] +struct Candidate { + summary: Summary, + replace: Option, +} + +impl Resolve { + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top(&self, pkg: &PackageId) -> Vec<&PackageId> { + let mut result = Vec::new(); + let mut pkg = pkg; + while let Some(pulling) = self.graph + .get_nodes() + .iter() + .filter_map(|(pulling, pulled)| + if pulled.contains(pkg) { + Some(pulling) + } else { + None + }) + .nth(0) { + result.push(pulling); + pkg = pulling; + } + result + } + pub fn register_used_patches(&mut self, + patches: &HashMap>) { + for summary in patches.values().flat_map(|v| v) { + if self.iter().any(|id| id == summary.package_id()) { + continue + } + self.unused_patches.push(summary.package_id().clone()); + } + } + + pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> { + // Given a previous instance of resolve, it should be forbidden to ever + // have a checksums which *differ*. If the same package id has differing + // checksums, then something has gone wrong such as: + // + // * Something got seriously corrupted + // * A "mirror" isn't actually a mirror as some changes were made + // * A replacement source wasn't actually a replacment, some changes + // were made + // + // In all of these cases, we want to report an error to indicate that + // something is awry. Normal execution (esp just using crates.io) should + // never run into this. + for (id, cksum) in previous.checksums.iter() { + if let Some(mine) = self.checksums.get(id) { + if mine == cksum { + continue + } + + // If the previous checksum wasn't calculated, the current + // checksum is `Some`. This may indicate that a source was + // erroneously replaced or was replaced with something that + // desires stronger checksum guarantees than can be afforded + // elsewhere. + if cksum.is_none() { + bail!("\ +checksum for `{}` was not previously calculated, but a checksum could now \ +be calculated + +this could be indicative of a few possible situations: + + * the source `{}` did not previously support checksums, + but was replaced with one that does + * newer Cargo implementations know how to checksum this source, but this + older implementation does not + * the lock file is corrupt +", id, id.source_id()) + + // If our checksum hasn't been calculated, then it could mean + // that future Cargo figured out how to checksum something or + // more realistically we were overridden with a source that does + // not have checksums. + } else if mine.is_none() { + bail!("\ +checksum for `{}` could not be calculated, but a checksum is listed in \ +the existing lock file + +this could be indicative of a few possible situations: + + * the source `{}` supports checksums, + but was replaced with one that doesn't + * the lock file is corrupt + +unable to verify that `{0}` is the same as when the lockfile was generated +", id, id.source_id()) + + // If the checksums aren't equal, and neither is None, then they + // must both be Some, in which case the checksum now differs. + // That's quite bad! + } else { + bail!("\ +checksum for `{}` changed between lock files + +this could be indicative of a few possible errors: + + * the lock file is corrupt + * a replacement source in use (e.g. a mirror) returned a different checksum + * the source itself may be corrupt in one way or another + +unable to verify that `{0}` is the same as when the lockfile was generated +", id); + } + } + } + + // Be sure to just copy over any unknown metadata. + self.metadata = previous.metadata.clone(); + Ok(()) + } + + pub fn iter(&self) -> Nodes { + self.graph.iter() + } + + pub fn deps(&self, pkg: &PackageId) -> Deps { + Deps { edges: self.graph.edges(pkg), resolve: self } + } + + pub fn deps_not_replaced(&self, pkg: &PackageId) -> DepsNotReplaced { + DepsNotReplaced { edges: self.graph.edges(pkg) } + } + + pub fn replacement(&self, pkg: &PackageId) -> Option<&PackageId> { + self.replacements.get(pkg) + } + + pub fn replacements(&self) -> &HashMap { + &self.replacements + } + + pub fn features(&self, pkg: &PackageId) -> &HashSet { + self.features.get(pkg).unwrap_or(&self.empty_features) + } + + pub fn features_sorted(&self, pkg: &PackageId) -> Vec<&str> { + let mut v = Vec::from_iter(self.features(pkg).iter().map(|s| s.as_ref())); + v.sort(); + v + } + + pub fn query(&self, spec: &str) -> CargoResult<&PackageId> { + PackageIdSpec::query_str(spec, self.iter()) + } + + pub fn unused_patches(&self) -> &[PackageId] { + &self.unused_patches + } +} + +impl fmt::Debug for Resolve { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "graph: {:?}\n", self.graph)?; + write!(fmt, "\nfeatures: {{\n")?; + for (pkg, features) in &self.features { + write!(fmt, " {}: {:?}\n", pkg, features)?; + } + write!(fmt, "}}") + } +} + +impl<'a> Iterator for Deps<'a> { + type Item = &'a PackageId; + + fn next(&mut self) -> Option<&'a PackageId> { + self.edges.as_mut() + .and_then(|e| e.next()) + .map(|id| self.resolve.replacement(id).unwrap_or(id)) + } +} + +impl<'a> Iterator for DepsNotReplaced<'a> { + type Item = &'a PackageId; + + fn next(&mut self) -> Option<&'a PackageId> { + self.edges.as_mut().and_then(|e| e.next()) + } +} + +struct RcList { + head: Option)>> +} + +impl RcList { + fn new() -> RcList { + RcList { head: None } + } + + fn push(&mut self, data: T) { + let node = Rc::new((data, RcList { head: self.head.take() })); + self.head = Some(node); + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcList { + fn clone(&self) -> RcList { + RcList { head: self.head.clone() } + } +} + +// Avoid stack overflows on drop by turning recursion into a loop +impl Drop for RcList { + fn drop(&mut self) { + let mut cur = self.head.take(); + while let Some(head) = cur { + match Rc::try_unwrap(head) { + Ok((_data, mut next)) => cur = next.head.take(), + Err(_) => break, + } + } + } +} + +enum GraphNode { + Add(PackageId), + Link(PackageId, PackageId), +} + +// A `Context` is basically a bunch of local resolution information which is +// kept around for all `BacktrackFrame` instances. As a result, this runs the +// risk of being cloned *a lot* so we want to make this as cheap to clone as +// possible. +#[derive(Clone)] +struct Context<'a> { + // TODO: Both this and the map below are super expensive to clone. We should + // switch to persistent hash maps if we can at some point or otherwise + // make these much cheaper to clone in general. + activations: Activations, + resolve_features: HashMap>, + + // These are two cheaply-cloneable lists (O(1) clone) which are effectively + // hash maps but are built up as "construction lists". We'll iterate these + // at the very end and actually construct the map that we're making. + resolve_graph: RcList, + resolve_replacements: RcList<(PackageId, PackageId)>, + + replacements: &'a [(PackageIdSpec, Dependency)], + + // These warnings are printed after resolution. + warnings: RcList, +} + +type Activations = HashMap>>; + +/// Builds the list of all packages required to build the first argument. +pub fn resolve(summaries: &[(Summary, Method)], + replacements: &[(PackageIdSpec, Dependency)], + registry: &mut Registry, + config: Option<&Config>) -> CargoResult { + let cx = Context { + resolve_graph: RcList::new(), + resolve_features: HashMap::new(), + resolve_replacements: RcList::new(), + activations: HashMap::new(), + replacements: replacements, + warnings: RcList::new(), + }; + let _p = profile::start("resolving"); + let cx = activate_deps_loop(cx, registry, summaries)?; + + let mut resolve = Resolve { + graph: cx.graph(), + empty_features: HashSet::new(), + checksums: HashMap::new(), + metadata: BTreeMap::new(), + replacements: cx.resolve_replacements(), + features: cx.resolve_features.iter().map(|(k, v)| { + (k.clone(), v.clone()) + }).collect(), + unused_patches: Vec::new(), + }; + + for summary in cx.activations.values() + .flat_map(|v| v.values()) + .flat_map(|v| v.iter()) { + let cksum = summary.checksum().map(|s| s.to_string()); + resolve.checksums.insert(summary.package_id().clone(), cksum); + } + + check_cycles(&resolve, &cx.activations)?; + trace!("resolved: {:?}", resolve); + + // If we have a shell, emit warnings about required deps used as feature. + if let Some(config) = config { + let mut shell = config.shell(); + let mut warnings = &cx.warnings; + while let Some(ref head) = warnings.head { + shell.warn(&head.0)?; + warnings = &head.1; + } + } + + Ok(resolve) +} + +/// Attempts to activate the summary `candidate` in the context `cx`. +/// +/// This function will pull dependency summaries from the registry provided, and +/// the dependencies of the package will be determined by the `method` provided. +/// If `candidate` was activated, this function returns the dependency frame to +/// iterate through next. +fn activate(cx: &mut Context, + registry: &mut Registry, + parent: Option<&Summary>, + candidate: Candidate, + method: &Method) + -> CargoResult> { + if let Some(parent) = parent { + cx.resolve_graph.push(GraphNode::Link(parent.package_id().clone(), + candidate.summary.package_id().clone())); + } + + let activated = cx.flag_activated(&candidate.summary, method); + + let candidate = match candidate.replace { + Some(replace) => { + cx.resolve_replacements.push((candidate.summary.package_id().clone(), + replace.package_id().clone())); + if cx.flag_activated(&replace, method) && activated { + return Ok(None); + } + trace!("activating {} (replacing {})", replace.package_id(), + candidate.summary.package_id()); + replace + } + None => { + if activated { + return Ok(None) + } + trace!("activating {}", candidate.summary.package_id()); + candidate.summary + } + }; + + let deps = cx.build_deps(registry, &candidate, method)?; + + Ok(Some(DepsFrame { + parent: candidate, + remaining_siblings: RcVecIter::new(Rc::new(deps)), + })) +} + +struct RcVecIter { + vec: Rc>, + rest: Range, +} + +impl RcVecIter { + fn new(vec: Rc>) -> RcVecIter { + RcVecIter { + rest: 0..vec.len(), + vec: vec, + } + } + + fn cur_index(&self) -> usize { + self.rest.start - 1 + } +} + +// Not derived to avoid `T: Clone` +impl Clone for RcVecIter { + fn clone(&self) -> RcVecIter { + RcVecIter { + vec: self.vec.clone(), + rest: self.rest.clone(), + } + } +} + +impl Iterator for RcVecIter where T: Clone { + type Item = (usize, T); + + fn next(&mut self) -> Option<(usize, T)> { + self.rest.next().and_then(|i| { + self.vec.get(i).map(|val| (i, val.clone())) + }) + } + + fn size_hint(&self) -> (usize, Option) { + self.rest.size_hint() + } +} + +#[derive(Clone)] +struct DepsFrame { + parent: Summary, + remaining_siblings: RcVecIter, +} + +impl DepsFrame { + /// Returns the least number of candidates that any of this frame's siblings + /// has. + /// + /// The `remaining_siblings` array is already sorted with the smallest + /// number of candidates at the front, so we just return the number of + /// candidates in that entry. + fn min_candidates(&self) -> usize { + self.remaining_siblings.clone().next().map(|(_, (_, candidates, _))| { + candidates.len() + }).unwrap_or(0) + } +} + +impl PartialEq for DepsFrame { + fn eq(&self, other: &DepsFrame) -> bool { + self.min_candidates() == other.min_candidates() + } +} + +impl Eq for DepsFrame {} + +impl PartialOrd for DepsFrame { + fn partial_cmp(&self, other: &DepsFrame) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for DepsFrame { + fn cmp(&self, other: &DepsFrame) -> Ordering { + // the frame with the sibling that has the least number of candidates + // needs to get the bubbled up to the top of the heap we use below, so + // reverse the order of the comparison here. + other.min_candidates().cmp(&self.min_candidates()) + } +} + +struct BacktrackFrame<'a> { + context_backup: Context<'a>, + deps_backup: BinaryHeap, + remaining_candidates: RemainingCandidates, + parent: Summary, + dep: Dependency, + features: Rc>, +} + +#[derive(Clone)] +struct RemainingCandidates { + remaining: RcVecIter, +} + +impl RemainingCandidates { + fn next(&mut self, prev_active: &[Summary]) -> Option { + // Filter the set of candidates based on the previously activated + // versions for this dependency. We can actually use a version if it + // precisely matches an activated version or if it is otherwise + // incompatible with all other activated versions. Note that we + // define "compatible" here in terms of the semver sense where if + // the left-most nonzero digit is the same they're considered + // compatible. + self.remaining.by_ref().map(|p| p.1).find(|b| { + prev_active.iter().any(|a| *a == b.summary) || + prev_active.iter().all(|a| { + !compatible(a.version(), b.summary.version()) + }) + }) + } +} + +/// Recursively activates the dependencies for `top`, in depth-first order, +/// backtracking across possible candidates for each dependency as necessary. +/// +/// If all dependencies can be activated and resolved to a version in the +/// dependency graph, cx.resolve is returned. +fn activate_deps_loop<'a>(mut cx: Context<'a>, + registry: &mut Registry, + summaries: &[(Summary, Method)]) + -> CargoResult> { + // Note that a `BinaryHeap` is used for the remaining dependencies that need + // activation. This heap is sorted such that the "largest value" is the most + // constrained dependency, or the one with the least candidates. + // + // This helps us get through super constrained portions of the dependency + // graph quickly and hopefully lock down what later larger dependencies can + // use (those with more candidates). + let mut backtrack_stack = Vec::new(); + let mut remaining_deps = BinaryHeap::new(); + for &(ref summary, ref method) in summaries { + debug!("initial activation: {}", summary.package_id()); + let candidate = Candidate { summary: summary.clone(), replace: None }; + remaining_deps.extend(activate(&mut cx, registry, None, candidate, + method)?); + } + + // Main resolution loop, this is the workhorse of the resolution algorithm. + // + // You'll note that a few stacks are maintained on the side, which might + // seem odd when this algorithm looks like it could be implemented + // recursively. While correct, this is implemented iteratively to avoid + // blowing the stack (the recusion depth is proportional to the size of the + // input). + // + // The general sketch of this loop is to run until there are no dependencies + // left to activate, and for each dependency to attempt to activate all of + // its own dependencies in turn. The `backtrack_stack` is a side table of + // backtracking states where if we hit an error we can return to in order to + // attempt to continue resolving. + while let Some(mut deps_frame) = remaining_deps.pop() { + let frame = match deps_frame.remaining_siblings.next() { + Some(sibling) => { + let parent = Summary::clone(&deps_frame.parent); + remaining_deps.push(deps_frame); + (parent, sibling) + } + None => continue, + }; + let (mut parent, (mut cur, (mut dep, candidates, mut features))) = frame; + assert!(!remaining_deps.is_empty()); + + let (next, has_another, remaining_candidates) = { + let prev_active = cx.prev_active(&dep); + trace!("{}[{}]>{} {} candidates", parent.name(), cur, dep.name(), + candidates.len()); + trace!("{}[{}]>{} {} prev activations", parent.name(), cur, + dep.name(), prev_active.len()); + let mut candidates = RemainingCandidates { + remaining: RcVecIter::new(Rc::clone(&candidates)), + }; + (candidates.next(prev_active), + candidates.clone().next(prev_active).is_some(), + candidates) + }; + + // Alright, for each candidate that's gotten this far, it meets the + // following requirements: + // + // 1. The version matches the dependency requirement listed for this + // package + // 2. There are no activated versions for this package which are + // semver-compatible, or there's an activated version which is + // precisely equal to `candidate`. + // + // This means that we're going to attempt to activate each candidate in + // turn. We could possibly fail to activate each candidate, so we try + // each one in turn. + let candidate = match next { + Some(candidate) => { + // We have a candidate. Add an entry to the `backtrack_stack` so + // we can try the next one if this one fails. + if has_another { + backtrack_stack.push(BacktrackFrame { + context_backup: Context::clone(&cx), + deps_backup: >::clone(&remaining_deps), + remaining_candidates: remaining_candidates, + parent: Summary::clone(&parent), + dep: Dependency::clone(&dep), + features: Rc::clone(&features), + }); + } + candidate + } + None => { + // This dependency has no valid candidate. Backtrack until we + // find a dependency that does have a candidate to try, and try + // to activate that one. This resets the `remaining_deps` to + // their state at the found level of the `backtrack_stack`. + trace!("{}[{}]>{} -- no candidates", parent.name(), cur, + dep.name()); + match find_candidate(&mut backtrack_stack, + &mut cx, + &mut remaining_deps, + &mut parent, + &mut cur, + &mut dep, + &mut features) { + None => return Err(activation_error(&cx, registry, &parent, + &dep, + cx.prev_active(&dep), + &candidates)), + Some(candidate) => candidate, + } + } + }; + + let method = Method::Required { + dev_deps: false, + features: &features, + uses_default_features: dep.uses_default_features(), + }; + trace!("{}[{}]>{} trying {}", parent.name(), cur, dep.name(), + candidate.summary.version()); + remaining_deps.extend(activate(&mut cx, registry, Some(&parent), + candidate, &method)?); + } + + Ok(cx) +} + +// Searches up `backtrack_stack` until it finds a dependency with remaining +// candidates. Resets `cx` and `remaining_deps` to that level and returns the +// next candidate. If all candidates have been exhausted, returns None. +fn find_candidate<'a>(backtrack_stack: &mut Vec>, + cx: &mut Context<'a>, + remaining_deps: &mut BinaryHeap, + parent: &mut Summary, + cur: &mut usize, + dep: &mut Dependency, + features: &mut Rc>) -> Option { + while let Some(mut frame) = backtrack_stack.pop() { + let (next, has_another) = { + let prev_active = frame.context_backup.prev_active(&frame.dep); + (frame.remaining_candidates.next(prev_active), + frame.remaining_candidates.clone().next(prev_active).is_some()) + }; + if let Some(candidate) = next { + if has_another { + *cx = frame.context_backup.clone(); + *remaining_deps = frame.deps_backup.clone(); + *parent = frame.parent.clone(); + *dep = frame.dep.clone(); + *features = Rc::clone(&frame.features); + backtrack_stack.push(frame); + } else { + *cx = frame.context_backup; + *remaining_deps = frame.deps_backup; + *parent = frame.parent; + *dep = frame.dep; + *features = frame.features; + } + *cur = remaining_deps.peek().unwrap().remaining_siblings.cur_index(); + return Some(candidate) + } + } + None +} + +fn activation_error(cx: &Context, + registry: &mut Registry, + parent: &Summary, + dep: &Dependency, + prev_active: &[Summary], + candidates: &[Candidate]) -> CargoError { + if !candidates.is_empty() { + let mut msg = format!("failed to select a version for `{}` \ + (required by `{}`):\n\ + all possible versions conflict with \ + previously selected versions of `{}`", + dep.name(), parent.name(), + dep.name()); + let graph = cx.graph(); + 'outer: for v in prev_active.iter() { + for node in graph.iter() { + let edges = match graph.edges(node) { + Some(edges) => edges, + None => continue, + }; + for edge in edges { + if edge != v.package_id() { continue } + + msg.push_str(&format!("\n version {} in use by {}", + v.version(), edge)); + continue 'outer; + } + } + msg.push_str(&format!("\n version {} in use by ??", + v.version())); + } + + msg.push_str(&format!("\n possible versions to select: {}", + candidates.iter() + .map(|v| v.summary.version()) + .map(|v| v.to_string()) + .collect::>() + .join(", "))); + + return msg.into() + } + + // Once we're all the way down here, we're definitely lost in the + // weeds! We didn't actually use any candidates above, so we need to + // give an error message that nothing was found. + // + // Note that we re-query the registry with a new dependency that + // allows any version so we can give some nicer error reporting + // which indicates a few versions that were actually found. + let all_req = semver::VersionReq::parse("*").unwrap(); + let mut new_dep = dep.clone(); + new_dep.set_version_req(all_req); + let mut candidates = match registry.query_vec(&new_dep) { + Ok(candidates) => candidates, + Err(e) => return e, + }; + candidates.sort_by(|a, b| { + b.version().cmp(a.version()) + }); + + let msg = if !candidates.is_empty() { + let versions = { + let mut versions = candidates.iter().take(3).map(|cand| { + cand.version().to_string() + }).collect::>(); + + if candidates.len() > 3 { + versions.push("...".into()); + } + + versions.join(", ") + }; + + let mut msg = format!("no matching version `{}` found for package `{}` \ + (required by `{}`)\n\ + location searched: {}\n\ + versions found: {}", + dep.version_req(), + dep.name(), + parent.name(), + dep.source_id(), + versions); + + // If we have a path dependency with a locked version, then this may + // indicate that we updated a sub-package and forgot to run `cargo + // update`. In this case try to print a helpful error! + if dep.source_id().is_path() + && dep.version_req().to_string().starts_with('=') { + msg.push_str("\nconsider running `cargo update` to update \ + a path dependency's locked version"); + } + + msg + } else { + format!("no matching package named `{}` found \ + (required by `{}`)\n\ + location searched: {}\n\ + version required: {}", + dep.name(), parent.name(), + dep.source_id(), + dep.version_req()) + }; + + msg.into() +} + +// Returns if `a` and `b` are compatible in the semver sense. This is a +// commutative operation. +// +// Versions `a` and `b` are compatible if their left-most nonzero digit is the +// same. +fn compatible(a: &semver::Version, b: &semver::Version) -> bool { + if a.major != b.major { return false } + if a.major != 0 { return true } + if a.minor != b.minor { return false } + if a.minor != 0 { return true } + a.patch == b.patch +} + +// Returns a pair of (feature dependencies, all used features) +// +// The feature dependencies map is a mapping of package name to list of features +// enabled. Each package should be enabled, and each package should have the +// specified set of features enabled. The boolean indicates whether this +// package was specifically requested (rather than just requesting features +// *within* this package). +// +// The all used features set is the set of features which this local package had +// enabled, which is later used when compiling to instruct the code what +// features were enabled. +fn build_features<'a>(s: &'a Summary, method: &'a Method) + -> CargoResult<(HashMap<&'a str, (bool, Vec)>, HashSet<&'a str>)> { + let mut deps = HashMap::new(); + let mut used = HashSet::new(); + let mut visited = HashSet::new(); + match *method { + Method::Everything => { + for key in s.features().keys() { + add_feature(s, key, &mut deps, &mut used, &mut visited)?; + } + for dep in s.dependencies().iter().filter(|d| d.is_optional()) { + add_feature(s, dep.name(), &mut deps, &mut used, + &mut visited)?; + } + } + Method::Required { features: requested_features, .. } => { + for feat in requested_features.iter() { + add_feature(s, feat, &mut deps, &mut used, &mut visited)?; + } + } + } + match *method { + Method::Everything | + Method::Required { uses_default_features: true, .. } => { + if s.features().get("default").is_some() { + add_feature(s, "default", &mut deps, &mut used, + &mut visited)?; + } + } + Method::Required { uses_default_features: false, .. } => {} + } + return Ok((deps, used)); + + fn add_feature<'a>(s: &'a Summary, + feat: &'a str, + deps: &mut HashMap<&'a str, (bool, Vec)>, + used: &mut HashSet<&'a str>, + visited: &mut HashSet<&'a str>) -> CargoResult<()> { + if feat.is_empty() { return Ok(()) } + + // If this feature is of the form `foo/bar`, then we just lookup package + // `foo` and enable its feature `bar`. Otherwise this feature is of the + // form `foo` and we need to recurse to enable the feature `foo` for our + // own package, which may end up enabling more features or just enabling + // a dependency. + let mut parts = feat.splitn(2, '/'); + let feat_or_package = parts.next().unwrap(); + match parts.next() { + Some(feat) => { + let package = feat_or_package; + used.insert(package); + deps.entry(package) + .or_insert((false, Vec::new())) + .1.push(feat.to_string()); + } + None => { + let feat = feat_or_package; + + //if this feature has already been added, then just return Ok + if !visited.insert(feat) { + return Ok(()); + } + + used.insert(feat); + match s.features().get(feat) { + Some(recursive) => { + // This is a feature, add it recursively. + for f in recursive { + if f == feat { + bail!("Cyclic feature dependency: feature `{}` depends \ + on itself", feat); + } + + add_feature(s, f, deps, used, visited)?; + } + } + None => { + // This is a dependency, mark it as explicitly requested. + deps.entry(feat).or_insert((false, Vec::new())).0 = true; + } + } + } + } + Ok(()) + } +} + +impl<'a> Context<'a> { + // Activate this summary by inserting it into our list of known activations. + // + // Returns if this summary with the given method is already activated. + fn flag_activated(&mut self, + summary: &Summary, + method: &Method) -> bool { + let id = summary.package_id(); + let prev = self.activations + .entry(id.name().to_string()) + .or_insert_with(HashMap::new) + .entry(id.source_id().clone()) + .or_insert(Vec::new()); + if !prev.iter().any(|c| c == summary) { + self.resolve_graph.push(GraphNode::Add(id.clone())); + prev.push(summary.clone()); + return false + } + debug!("checking if {} is already activated", summary.package_id()); + let (features, use_default) = match *method { + Method::Required { features, uses_default_features, .. } => { + (features, uses_default_features) + } + Method::Everything => return false, + }; + + let has_default_feature = summary.features().contains_key("default"); + match self.resolve_features.get(id) { + Some(prev) => { + features.iter().all(|f| prev.contains(f)) && + (!use_default || prev.contains("default") || + !has_default_feature) + } + None => features.is_empty() && (!use_default || !has_default_feature) + } + } + + fn build_deps(&mut self, + registry: &mut Registry, + candidate: &Summary, + method: &Method) -> CargoResult> { + // First, figure out our set of dependencies based on the requsted set + // of features. This also calculates what features we're going to enable + // for our own dependencies. + let deps = self.resolve_features(candidate, method)?; + + // Next, transform all dependencies into a list of possible candidates + // which can satisfy that dependency. + let mut deps = deps.into_iter().map(|(dep, features)| { + let mut candidates = self.query(registry, &dep)?; + // When we attempt versions for a package, we'll want to start at + // the maximum version and work our way down. + candidates.sort_by(|a, b| { + b.summary.version().cmp(a.summary.version()) + }); + Ok((dep, Rc::new(candidates), Rc::new(features))) + }).collect::>>()?; + + // Attempt to resolve dependencies with fewer candidates before trying + // dependencies with more candidates. This way if the dependency with + // only one candidate can't be resolved we don't have to do a bunch of + // work before we figure that out. + deps.sort_by_key(|&(_, ref a, _)| a.len()); + + Ok(deps) + } + + /// Queries the `registry` to return a list of candidates for `dep`. + /// + /// This method is the location where overrides are taken into account. If + /// any candidates are returned which match an override then the override is + /// applied by performing a second query for what the override should + /// return. + fn query(&self, + registry: &mut Registry, + dep: &Dependency) -> CargoResult> { + let mut ret = Vec::new(); + registry.query(dep, &mut |s| { + ret.push(Candidate { summary: s, replace: None }); + })?; + for candidate in ret.iter_mut() { + let summary = &candidate.summary; + + let mut potential_matches = self.replacements.iter() + .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); + + let &(ref spec, ref dep) = match potential_matches.next() { + None => continue, + Some(replacement) => replacement, + }; + debug!("found an override for {} {}", dep.name(), dep.version_req()); + + let mut summaries = registry.query_vec(dep)?.into_iter(); + let s = summaries.next().ok_or_else(|| { + format!("no matching package for override `{}` found\n\ + location searched: {}\n\ + version required: {}", + spec, dep.source_id(), dep.version_req()) + })?; + let summaries = summaries.collect::>(); + if !summaries.is_empty() { + let bullets = summaries.iter().map(|s| { + format!(" * {}", s.package_id()) + }).collect::>(); + bail!("the replacement specification `{}` matched \ + multiple packages:\n * {}\n{}", spec, s.package_id(), + bullets.join("\n")); + } + + // The dependency should be hard-coded to have the same name and an + // exact version requirement, so both of these assertions should + // never fail. + assert_eq!(s.version(), summary.version()); + assert_eq!(s.name(), summary.name()); + + let replace = if s.source_id() == summary.source_id() { + debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); + None + } else { + Some(s) + }; + let matched_spec = spec.clone(); + + // Make sure no duplicates + if let Some(&(ref spec, _)) = potential_matches.next() { + bail!("overlapping replacement specifications found:\n\n \ + * {}\n * {}\n\nboth specifications match: {}", + matched_spec, spec, summary.package_id()); + } + + for dep in summary.dependencies() { + debug!("\t{} => {}", dep.name(), dep.version_req()); + } + + candidate.replace = replace; + } + Ok(ret) + } + + fn prev_active(&self, dep: &Dependency) -> &[Summary] { + self.activations.get(dep.name()) + .and_then(|v| v.get(dep.source_id())) + .map(|v| &v[..]) + .unwrap_or(&[]) + } + + /// Return all dependencies and the features we want from them. + fn resolve_features<'b>(&mut self, + s: &'b Summary, + method: &'b Method) + -> CargoResult)>> { + let dev_deps = match *method { + Method::Everything => true, + Method::Required { dev_deps, .. } => dev_deps, + }; + + // First, filter by dev-dependencies + let deps = s.dependencies(); + let deps = deps.iter().filter(|d| d.is_transitive() || dev_deps); + + let (mut feature_deps, used_features) = build_features(s, method)?; + let mut ret = Vec::new(); + + // Next, collect all actually enabled dependencies and their features. + for dep in deps { + // Skip optional dependencies, but not those enabled through a feature + if dep.is_optional() && !feature_deps.contains_key(dep.name()) { + continue + } + // So we want this dependency. Move the features we want from `feature_deps` + // to `ret`. + let base = feature_deps.remove(dep.name()).unwrap_or((false, vec![])); + if !dep.is_optional() && base.0 { + self.warnings.push( + format!("Package `{}` does not have feature `{}`. It has a required dependency \ + with that name, but only optional dependencies can be used as features. \ + This is currently a warning to ease the transition, but it will become an \ + error in the future.", + s.package_id(), dep.name()) + ); + } + let mut base = base.1; + base.extend(dep.features().iter().cloned()); + for feature in base.iter() { + if feature.contains("/") { + bail!("feature names may not contain slashes: `{}`", feature); + } + } + ret.push((dep.clone(), base)); + } + + // Any remaining entries in feature_deps are bugs in that the package does not actually + // have those dependencies. We classified them as dependencies in the first place + // because there is no such feature, either. + if !feature_deps.is_empty() { + let unknown = feature_deps.keys().map(|s| &s[..]) + .collect::>(); + let features = unknown.join(", "); + bail!("Package `{}` does not have these features: `{}`", + s.package_id(), features) + } + + // Record what list of features is active for this package. + if !used_features.is_empty() { + let pkgid = s.package_id(); + + let set = self.resolve_features.entry(pkgid.clone()) + .or_insert_with(HashSet::new); + for feature in used_features { + if !set.contains(feature) { + set.insert(feature.to_string()); + } + } + } + + Ok(ret) + } + + fn resolve_replacements(&self) -> HashMap { + let mut replacements = HashMap::new(); + let mut cur = &self.resolve_replacements; + while let Some(ref node) = cur.head { + let (k, v) = node.0.clone(); + replacements.insert(k, v); + cur = &node.1; + } + replacements + } + + fn graph(&self) -> Graph { + let mut graph = Graph::new(); + let mut cur = &self.resolve_graph; + while let Some(ref node) = cur.head { + match node.0 { + GraphNode::Add(ref p) => graph.add(p.clone(), &[]), + GraphNode::Link(ref a, ref b) => graph.link(a.clone(), b.clone()), + } + cur = &node.1; + } + graph + } +} + +fn check_cycles(resolve: &Resolve, activations: &Activations) + -> CargoResult<()> { + let summaries: HashMap<&PackageId, &Summary> = activations.values() + .flat_map(|v| v.values()) + .flat_map(|v| v) + .map(|s| (s.package_id(), s)) + .collect(); + + // Sort packages to produce user friendly deterministic errors. + let all_packages = resolve.iter().collect::>().into_sorted_vec(); + let mut checked = HashSet::new(); + for pkg in all_packages { + if !checked.contains(pkg) { + visit(resolve, + pkg, + &summaries, + &mut HashSet::new(), + &mut checked)? + } + } + return Ok(()); + + fn visit<'a>(resolve: &'a Resolve, + id: &'a PackageId, + summaries: &HashMap<&'a PackageId, &Summary>, + visited: &mut HashSet<&'a PackageId>, + checked: &mut HashSet<&'a PackageId>) + -> CargoResult<()> { + // See if we visited ourselves + if !visited.insert(id) { + bail!("cyclic package dependency: package `{}` depends on itself", + id); + } + + // If we've already checked this node no need to recurse again as we'll + // just conclude the same thing as last time, so we only execute the + // recursive step if we successfully insert into `checked`. + // + // Note that if we hit an intransitive dependency then we clear out the + // visitation list as we can't induce a cycle through transitive + // dependencies. + if checked.insert(id) { + let summary = summaries[id]; + for dep in resolve.deps_not_replaced(id) { + let is_transitive = summary.dependencies().iter().any(|d| { + d.matches_id(dep) && d.is_transitive() + }); + let mut empty = HashSet::new(); + let visited = if is_transitive {&mut *visited} else {&mut empty}; + visit(resolve, dep, summaries, visited, checked)?; + + if let Some(id) = resolve.replacement(dep) { + visit(resolve, id, summaries, visited, checked)?; + } + } + } + + // Ok, we're done, no longer visiting our node any more + visited.remove(id); + Ok(()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/shell.rs b/collector/compile-benchmarks/cargo/src/cargo/core/shell.rs new file mode 100644 index 000000000..6911339d8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/shell.rs @@ -0,0 +1,251 @@ +use std::fmt; +use std::io::prelude::*; + +use atty; +use termcolor::Color::{Green, Red, Yellow}; +use termcolor::{self, StandardStream, Color, ColorSpec, WriteColor}; + +use util::errors::CargoResult; + +/// The requested verbosity of output +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum Verbosity { + Verbose, + Normal, + Quiet +} + +/// An abstraction around a `Write`able object that remembers preferences for output verbosity and +/// color. +pub struct Shell { + /// the `Write`able object, either with or without color support (represented by different enum + /// variants) + err: ShellOut, + /// How verbose messages should be + verbosity: Verbosity, +} + +impl fmt::Debug for Shell { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match &self.err { + &ShellOut::Write(_) => f.debug_struct("Shell") + .field("verbosity", &self.verbosity) + .finish(), + &ShellOut::Stream(_, color_choice) => f.debug_struct("Shell") + .field("verbosity", &self.verbosity) + .field("color_choice", &color_choice) + .finish() + } + } +} + +/// A `Write`able object, either with or without color support +enum ShellOut { + /// A plain write object without color support + Write(Box), + /// Color-enabled stdio, with information on whether color should be used + Stream(StandardStream, ColorChoice), +} + +/// Whether messages should use color output +#[derive(Debug, PartialEq, Clone, Copy)] +pub enum ColorChoice { + /// Force color output + Always, + /// Force disable color output + Never, + /// Intelligently guess whether to use color output + CargoAuto, +} + +impl Shell { + /// Create a new shell (color choice and verbosity), defaulting to 'auto' color and verbose + /// output. + pub fn new() -> Shell { + Shell { + err: ShellOut::Stream( + StandardStream::stderr(ColorChoice::CargoAuto.to_termcolor_color_choice()), + ColorChoice::CargoAuto, + ), + verbosity: Verbosity::Verbose, + } + } + + /// Create a shell from a plain writable object, with no color, and max verbosity. + pub fn from_write(out: Box) -> Shell { + Shell { + err: ShellOut::Write(out), + verbosity: Verbosity::Verbose, + } + } + + /// Print a message, where the status will have `color` color, and can be justified. The + /// messages follows without color. + fn print(&mut self, + status: &fmt::Display, + message: &fmt::Display, + color: Color, + justified: bool) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => { + self.err.print(status, message, color, justified) + } + } + } + + /// Get a reference to the underlying writer + pub fn err(&mut self) -> &mut Write { + self.err.as_write() + } + + /// Shortcut to right-align and color green a status message. + pub fn status(&mut self, status: T, message: U) -> CargoResult<()> + where T: fmt::Display, U: fmt::Display + { + self.print(&status, &message, Green, true) + } + + /// Shortcut to right-align a status message. + pub fn status_with_color(&mut self, + status: T, + message: U, + color: Color) -> CargoResult<()> + where T: fmt::Display, U: fmt::Display + { + self.print(&status, &message, color, true) + } + + /// Run the callback only if we are in verbose mode + pub fn verbose(&mut self, mut callback: F) -> CargoResult<()> + where F: FnMut(&mut Shell) -> CargoResult<()> + { + match self.verbosity { + Verbosity::Verbose => callback(self), + _ => Ok(()) + } + } + + /// Run the callback if we are not in verbose mode. + pub fn concise(&mut self, mut callback: F) -> CargoResult<()> + where F: FnMut(&mut Shell) -> CargoResult<()> + { + match self.verbosity { + Verbosity::Verbose => Ok(()), + _ => callback(self) + } + } + + /// Print a red 'error' message + pub fn error(&mut self, message: T) -> CargoResult<()> { + self.print(&"error:", &message, Red, false) + } + + /// Print an amber 'warning' message + pub fn warn(&mut self, message: T) -> CargoResult<()> { + match self.verbosity { + Verbosity::Quiet => Ok(()), + _ => self.print(&"warning:", &message, Yellow, false), + } + } + + /// Update the verbosity of the shell + pub fn set_verbosity(&mut self, verbosity: Verbosity) { + self.verbosity = verbosity; + } + + /// Get the verbosity of the shell + pub fn verbosity(&self) -> Verbosity { + self.verbosity + } + + /// Update the color choice (always, never, or auto) from a string. + pub fn set_color_choice(&mut self, color: Option<&str>) -> CargoResult<()> { + if let ShellOut::Stream(ref mut err, ref mut cc) = self.err { + let cfg = match color { + Some("always") => ColorChoice::Always, + Some("never") => ColorChoice::Never, + + Some("auto") | + None => ColorChoice::CargoAuto, + + Some(arg) => bail!("argument for --color must be auto, always, or \ + never, but found `{}`", arg), + }; + *cc = cfg; + *err = StandardStream::stderr(cfg.to_termcolor_color_choice()); + } + Ok(()) + } + + /// Get the current color choice + /// + /// If we are not using a color stream, this will always return Never, even if the color choice + /// has been set to something else. + pub fn color_choice(&self) -> ColorChoice { + match self.err { + ShellOut::Stream(_, cc) => cc, + ShellOut::Write(_) => ColorChoice::Never, + } + } +} + +impl ShellOut { + /// Print out a message with a status. The status comes first and is bold + the given color. + /// The status can be justified, in which case the max width that will right align is 12 chars. + fn print(&mut self, + status: &fmt::Display, + message: &fmt::Display, + color: Color, + justified: bool) -> CargoResult<()> { + match *self { + ShellOut::Stream(ref mut err, _) => { + err.reset()?; + err.set_color(ColorSpec::new() + .set_bold(true) + .set_fg(Some(color)))?; + if justified { + write!(err, "{:>12}", status)?; + } else { + write!(err, "{}", status)?; + } + err.reset()?; + write!(err, " {}\n", message)?; + } + ShellOut::Write(ref mut w) => { + if justified { + write!(w, "{:>12}", status)?; + } else { + write!(w, "{}", status)?; + } + write!(w, " {}\n", message)?; + } + } + Ok(()) + } + + /// Get this object as a `io::Write`. + fn as_write(&mut self) -> &mut Write { + match *self { + ShellOut::Stream(ref mut err, _) => err, + ShellOut::Write(ref mut w) => w, + } + } +} + +impl ColorChoice { + /// Convert our color choice to termcolor's version + fn to_termcolor_color_choice(&self) -> termcolor::ColorChoice { + match *self { + ColorChoice::Always => termcolor::ColorChoice::Always, + ColorChoice::Never => termcolor::ColorChoice::Never, + ColorChoice::CargoAuto => { + if atty::is(atty::Stream::Stderr) { + termcolor::ColorChoice::Auto + } else { + termcolor::ColorChoice::Never + } + } + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/source.rs b/collector/compile-benchmarks/cargo/src/cargo/core/source.rs new file mode 100644 index 000000000..01d659919 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/source.rs @@ -0,0 +1,652 @@ +use std::cmp::{self, Ordering}; +use std::collections::hash_map::{HashMap, Values, IterMut}; +use std::fmt::{self, Formatter}; +use std::hash::{self, Hash}; +use std::path::Path; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT}; +use std::sync::atomic::Ordering::SeqCst; + +use serde::ser; +use serde::de; +use url::Url; + +use core::{Package, PackageId, Registry}; +use ops; +use sources::git; +use sources::{PathSource, GitSource, RegistrySource, CRATES_IO}; +use sources::DirectorySource; +use util::{Config, CargoResult, ToUrl}; + +/// A Source finds and downloads remote packages based on names and +/// versions. +pub trait Source: Registry { + /// Returns the `SourceId` corresponding to this source + fn source_id(&self) -> &SourceId; + + /// The update method performs any network operations required to + /// get the entire list of all names, versions and dependencies of + /// packages managed by the Source. + fn update(&mut self) -> CargoResult<()>; + + /// The download method fetches the full package for each name and + /// version specified. + fn download(&mut self, package: &PackageId) -> CargoResult; + + /// Generates a unique string which represents the fingerprint of the + /// current state of the source. + /// + /// This fingerprint is used to determine the "fresheness" of the source + /// later on. It must be guaranteed that the fingerprint of a source is + /// constant if and only if the output product will remain constant. + /// + /// The `pkg` argument is the package which this fingerprint should only be + /// interested in for when this source may contain multiple packages. + fn fingerprint(&self, pkg: &Package) -> CargoResult; + + /// If this source supports it, verifies the source of the package + /// specified. + /// + /// Note that the source may also have performed other checksum-based + /// verification during the `download` step, but this is intended to be run + /// just before a crate is compiled so it may perform more expensive checks + /// which may not be cacheable. + fn verify(&self, _pkg: &PackageId) -> CargoResult<()> { + Ok(()) + } +} + +impl<'a, T: Source + ?Sized + 'a> Source for Box { + /// Forwards to `Source::source_id` + fn source_id(&self) -> &SourceId { + (**self).source_id() + } + + /// Forwards to `Source::update` + fn update(&mut self) -> CargoResult<()> { + (**self).update() + } + + /// Forwards to `Source::download` + fn download(&mut self, id: &PackageId) -> CargoResult { + (**self).download(id) + } + + /// Forwards to `Source::fingerprint` + fn fingerprint(&self, pkg: &Package) -> CargoResult { + (**self).fingerprint(pkg) + } + + /// Forwards to `Source::verify` + fn verify(&self, pkg: &PackageId) -> CargoResult<()> { + (**self).verify(pkg) + } +} + +/// The possible kinds of code source. Along with a URL, this fully defines the +/// source +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +enum Kind { + /// Kind::Git() represents a git repository + Git(GitReference), + /// represents a local path + Path, + /// represents the central registry + Registry, + /// represents a local filesystem-based registry + LocalRegistry, + /// represents a directory-based registry + Directory, +} + +/// Information to find a specific commit in a git repository +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum GitReference { + /// from a tag + Tag(String), + /// from the HEAD of a branch + Branch(String), + /// from a specific revision + Rev(String), +} + +/// Unique identifier for a source of packages. +#[derive(Clone, Eq, Debug)] +pub struct SourceId { + inner: Arc, +} + +/// Unique identifier for a source of packages. +#[derive(Eq, Clone, Debug)] +struct SourceIdInner { + /// The source URL + url: Url, + /// `git::canonicalize_url(url)` for the url field + canonical_url: Url, + /// The source kind + kind: Kind, + // e.g. the exact git revision of the specified branch for a Git Source + precise: Option, +} + +impl SourceId { + /// Create a SourceId object from the kind and url. + /// + /// The canonical url will be calculated, but the precise field will not + fn new(kind: Kind, url: Url) -> CargoResult { + let source_id = SourceId { + inner: Arc::new(SourceIdInner { + kind: kind, + canonical_url: git::canonicalize_url(&url)?, + url: url, + precise: None, + }), + }; + Ok(source_id) + } + + /// Parses a source URL and returns the corresponding ID. + /// + /// ## Example + /// + /// ``` + /// use cargo::core::SourceId; + /// SourceId::from_url("git+https://github.com/alexcrichton/\ + /// libssh2-static-sys#80e71a3021618eb05\ + /// 656c58fb7c5ef5f12bc747f"); + /// ``` + pub fn from_url(string: &str) -> CargoResult { + let mut parts = string.splitn(2, '+'); + let kind = parts.next().unwrap(); + let url = parts.next().ok_or_else(|| format!("invalid source `{}`", string))?; + + match kind { + "git" => { + let mut url = url.to_url()?; + let mut reference = GitReference::Branch("master".to_string()); + for (k, v) in url.query_pairs() { + match &k[..] { + // map older 'ref' to branch + "branch" | + "ref" => reference = GitReference::Branch(v.into_owned()), + + "rev" => reference = GitReference::Rev(v.into_owned()), + "tag" => reference = GitReference::Tag(v.into_owned()), + _ => {} + } + } + let precise = url.fragment().map(|s| s.to_owned()); + url.set_fragment(None); + url.set_query(None); + Ok(SourceId::for_git(&url, reference)?.with_precise(precise)) + }, + "registry" => { + let url = url.to_url()?; + Ok(SourceId::new(Kind::Registry, url)? + .with_precise(Some("locked".to_string()))) + } + "path" => { + let url = url.to_url()?; + SourceId::new(Kind::Path, url) + } + kind => Err(format!("unsupported source protocol: {}", kind).into()) + } + } + + /// A view of the `SourceId` that can be `Display`ed as a URL + pub fn to_url(&self) -> SourceIdToUrl { + SourceIdToUrl { inner: &*self.inner } + } + + /// Create a SourceId from a filesystem path. + /// + /// Pass absolute path + pub fn for_path(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Path, url) + } + + /// Crate a SourceId from a git reference + pub fn for_git(url: &Url, reference: GitReference) -> CargoResult { + SourceId::new(Kind::Git(reference), url.clone()) + } + + /// Create a SourceId from a registry url + pub fn for_registry(url: &Url) -> CargoResult { + SourceId::new(Kind::Registry, url.clone()) + } + + /// Create a SourceId from a local registry path + pub fn for_local_registry(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::LocalRegistry, url) + } + + /// Create a SourceId from a directory path + pub fn for_directory(path: &Path) -> CargoResult { + let url = path.to_url()?; + SourceId::new(Kind::Directory, url) + } + + /// Returns the `SourceId` corresponding to the main repository. + /// + /// This is the main cargo registry by default, but it can be overridden in + /// a `.cargo/config`. + pub fn crates_io(config: &Config) -> CargoResult { + let cfg = ops::registry_configuration(config)?; + let url = if let Some(ref index) = cfg.index { + static WARNED: AtomicBool = ATOMIC_BOOL_INIT; + if !WARNED.swap(true, SeqCst) { + config.shell().warn("custom registry support via \ + the `registry.index` configuration is \ + being removed, this functionality \ + will not work in the future")?; + } + &index[..] + } else { + CRATES_IO + }; + let url = url.to_url()?; + SourceId::for_registry(&url) + } + + /// Get this source URL + pub fn url(&self) -> &Url { + &self.inner.url + } + + /// Is this source from a filesystem path + pub fn is_path(&self) -> bool { + self.inner.kind == Kind::Path + } + + /// Is this source from a registry (either local or not) + pub fn is_registry(&self) -> bool { + self.inner.kind == Kind::Registry || self.inner.kind == Kind::LocalRegistry + } + + /// Is this source from a git repository + pub fn is_git(&self) -> bool { + match self.inner.kind { + Kind::Git(_) => true, + _ => false, + } + } + + /// Creates an implementation of `Source` corresponding to this ID. + pub fn load<'a>(&self, config: &'a Config) -> CargoResult> { + trace!("loading SourceId; {}", self); + match self.inner.kind { + Kind::Git(..) => Ok(Box::new(GitSource::new(self, config)?)), + Kind::Path => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(PathSource::new(&path, self, config))) + } + Kind::Registry => Ok(Box::new(RegistrySource::remote(self, config))), + Kind::LocalRegistry => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(RegistrySource::local(self, &path, config))) + } + Kind::Directory => { + let path = match self.inner.url.to_file_path() { + Ok(p) => p, + Err(()) => panic!("path sources cannot be remote"), + }; + Ok(Box::new(DirectorySource::new(&path, self, config))) + } + } + } + + /// Get the value of the precise field + pub fn precise(&self) -> Option<&str> { + self.inner.precise.as_ref().map(|s| &s[..]) + } + + /// Get the git reference if this is a git source, otherwise None. + pub fn git_reference(&self) -> Option<&GitReference> { + match self.inner.kind { + Kind::Git(ref s) => Some(s), + _ => None, + } + } + + /// Create a new SourceId from this source with the given `precise` + pub fn with_precise(&self, v: Option) -> SourceId { + SourceId { + inner: Arc::new(SourceIdInner { + precise: v, + ..(*self.inner).clone() + }) + } + } + + /// Whether the remote registry is the standard https://crates.io + pub fn is_default_registry(&self) -> bool { + match self.inner.kind { + Kind::Registry => {} + _ => return false, + } + self.inner.url.to_string() == CRATES_IO + } + + /// Hash `self` + /// + /// For paths, remove the workspace prefix so the same source will give the + /// same hash in different locations. + pub fn stable_hash(&self, workspace: &Path, into: &mut S) { + if self.is_path() { + if let Ok(p) = self.inner.url.to_file_path().unwrap().strip_prefix(workspace) { + self.inner.kind.hash(into); + p.to_str().unwrap().hash(into); + return + } + } + self.hash(into) + } +} + +impl PartialEq for SourceId { + fn eq(&self, other: &SourceId) -> bool { + (*self.inner).eq(&*other.inner) + } +} + +impl PartialOrd for SourceId { + fn partial_cmp(&self, other: &SourceId) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceId { + fn cmp(&self, other: &SourceId) -> Ordering { + self.inner.cmp(&other.inner) + } +} + +impl ser::Serialize for SourceId { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + if self.is_path() { + None::.serialize(s) + } else { + s.collect_str(&self.to_url()) + } + } +} + +impl<'de> de::Deserialize<'de> for SourceId { + fn deserialize(d: D) -> Result + where D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + SourceId::from_url(&string).map_err(de::Error::custom) + } +} + +impl fmt::Display for SourceId { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match *self.inner { + SourceIdInner { kind: Kind::Path, ref url, .. } => { + fmt::Display::fmt(url, f) + } + SourceIdInner { kind: Kind::Git(ref reference), ref url, + ref precise, .. } => { + write!(f, "{}", url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + + if let Some(ref s) = *precise { + let len = cmp::min(s.len(), 8); + write!(f, "#{}", &s[..len])?; + } + Ok(()) + } + SourceIdInner { kind: Kind::Registry, ref url, .. } | + SourceIdInner { kind: Kind::LocalRegistry, ref url, .. } => { + write!(f, "registry {}", url) + } + SourceIdInner { kind: Kind::Directory, ref url, .. } => { + write!(f, "dir {}", url) + } + } + } +} + +// This custom implementation handles situations such as when two git sources +// point at *almost* the same URL, but not quite, even when they actually point +// to the same repository. +/// This method tests for self and other values to be equal, and is used by ==. +/// +/// For git repositories, the canonical url is checked. +impl PartialEq for SourceIdInner { + fn eq(&self, other: &SourceIdInner) -> bool { + if self.kind != other.kind { + return false; + } + if self.url == other.url { + return true; + } + + match (&self.kind, &other.kind) { + (&Kind::Git(ref ref1), &Kind::Git(ref ref2)) => { + ref1 == ref2 && self.canonical_url == other.canonical_url + } + _ => false, + } + } +} + +impl PartialOrd for SourceIdInner { + fn partial_cmp(&self, other: &SourceIdInner) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for SourceIdInner { + fn cmp(&self, other: &SourceIdInner) -> Ordering { + match self.kind.cmp(&other.kind) { + Ordering::Equal => {} + ord => return ord, + } + match self.url.cmp(&other.url) { + Ordering::Equal => {} + ord => return ord, + } + match (&self.kind, &other.kind) { + (&Kind::Git(ref ref1), &Kind::Git(ref ref2)) => { + (ref1, &self.canonical_url).cmp(&(ref2, &other.canonical_url)) + } + _ => self.kind.cmp(&other.kind), + } + } +} + +// The hash of SourceId is used in the name of some Cargo folders, so shouldn't +// vary. `as_str` gives the serialisation of a url (which has a spec) and so +// insulates against possible changes in how the url crate does hashing. +impl Hash for SourceId { + fn hash(&self, into: &mut S) { + self.inner.kind.hash(into); + match *self.inner { + SourceIdInner { kind: Kind::Git(..), ref canonical_url, .. } => { + canonical_url.as_str().hash(into) + } + _ => self.inner.url.as_str().hash(into), + } + } +} + +/// A `Display`able view into a SourceId that will write it as a url +pub struct SourceIdToUrl<'a> { + inner: &'a SourceIdInner, +} + +impl<'a> fmt::Display for SourceIdToUrl<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self.inner { + SourceIdInner { kind: Kind::Path, ref url, .. } => { + write!(f, "path+{}", url) + } + SourceIdInner { + kind: Kind::Git(ref reference), ref url, ref precise, .. + } => { + write!(f, "git+{}", url)?; + if let Some(pretty) = reference.pretty_ref() { + write!(f, "?{}", pretty)?; + } + if let Some(precise) = precise.as_ref() { + write!(f, "#{}", precise)?; + } + Ok(()) + } + SourceIdInner { kind: Kind::Registry, ref url, .. } => { + write!(f, "registry+{}", url) + } + SourceIdInner { kind: Kind::LocalRegistry, ref url, .. } => { + write!(f, "local-registry+{}", url) + } + SourceIdInner { kind: Kind::Directory, ref url, .. } => { + write!(f, "directory+{}", url) + } + } + } +} + +impl GitReference { + /// Returns a `Display`able view of this git reference, or None if using + /// the head of the "master" branch + pub fn pretty_ref(&self) -> Option { + match *self { + GitReference::Branch(ref s) if *s == "master" => None, + _ => Some(PrettyRef { inner: self }), + } + } +} + +/// A git reference that can be `Display`ed +pub struct PrettyRef<'a> { + inner: &'a GitReference, +} + +impl<'a> fmt::Display for PrettyRef<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self.inner { + GitReference::Branch(ref b) => write!(f, "branch={}", b), + GitReference::Tag(ref s) => write!(f, "tag={}", s), + GitReference::Rev(ref s) => write!(f, "rev={}", s), + } + } +} + +/// A `HashMap` of `SourceId` -> `Box` +#[derive(Default)] +pub struct SourceMap<'src> { + map: HashMap>, +} + +/// A `std::collection::hash_map::Values` for `SourceMap` +pub type Sources<'a, 'src> = Values<'a, SourceId, Box>; + +/// A `std::collection::hash_map::IterMut` for `SourceMap` +pub struct SourcesMut<'a, 'src: 'a> { + inner: IterMut<'a, SourceId, Box>, +} + +impl<'src> SourceMap<'src> { + /// Create an empty map + pub fn new() -> SourceMap<'src> { + SourceMap { map: HashMap::new() } + } + + /// Like `HashMap::contains_key` + pub fn contains(&self, id: &SourceId) -> bool { + self.map.contains_key(id) + } + + /// Like `HashMap::get` + pub fn get(&self, id: &SourceId) -> Option<&(Source + 'src)> { + let source = self.map.get(id); + + source.map(|s| { + let s: &(Source + 'src) = &**s; + s + }) + } + + /// Like `HashMap::get_mut` + pub fn get_mut(&mut self, id: &SourceId) -> Option<&mut (Source + 'src)> { + self.map.get_mut(id).map(|s| { + let s: &mut (Source + 'src) = &mut **s; + s + }) + } + + /// Like `HashMap::get`, but first calculates the `SourceId` from a + /// `PackageId` + pub fn get_by_package_id(&self, pkg_id: &PackageId) -> Option<&(Source + 'src)> { + self.get(pkg_id.source_id()) + } + + /// Like `HashMap::insert`, but derives the SourceId key from the Source + pub fn insert(&mut self, source: Box) { + let id = source.source_id().clone(); + self.map.insert(id, source); + } + + /// Like `HashMap::is_empty` + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Like `HashMap::len` + pub fn len(&self) -> usize { + self.map.len() + } + + /// Like `HashMap::values` + pub fn sources<'a>(&'a self) -> Sources<'a, 'src> { + self.map.values() + } + + /// Like `HashMap::iter_mut` + pub fn sources_mut<'a>(&'a mut self) -> SourcesMut<'a, 'src> { + SourcesMut { inner: self.map.iter_mut() } + } +} + +impl<'a, 'src> Iterator for SourcesMut<'a, 'src> { + type Item = (&'a SourceId, &'a mut (Source + 'src)); + fn next(&mut self) -> Option<(&'a SourceId, &'a mut (Source + 'src))> { + self.inner.next().map(|(a, b)| (a, &mut **b)) + } +} + +#[cfg(test)] +mod tests { + use super::{SourceId, Kind, GitReference}; + use util::ToUrl; + + #[test] + fn github_sources_equal() { + let loc = "https://github.com/foo/bar".to_url().unwrap(); + let master = Kind::Git(GitReference::Branch("master".to_string())); + let s1 = SourceId::new(master.clone(), loc).unwrap(); + + let loc = "git://github.com/foo/bar".to_url().unwrap(); + let s2 = SourceId::new(master, loc.clone()).unwrap(); + + assert_eq!(s1, s2); + + let foo = Kind::Git(GitReference::Branch("foo".to_string())); + let s3 = SourceId::new(foo, loc).unwrap(); + assert!(s1 != s3); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/summary.rs b/collector/compile-benchmarks/cargo/src/cargo/core/summary.rs new file mode 100644 index 000000000..734f73bd6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/summary.rs @@ -0,0 +1,124 @@ +use std::collections::BTreeMap; +use std::mem; +use std::rc::Rc; + +use semver::Version; +use core::{Dependency, PackageId, SourceId}; + +use util::CargoResult; + +/// Subset of a `Manifest`. Contains only the most important informations about +/// a package. +/// +/// Summaries are cloned, and should not be mutated after creation +#[derive(Debug, Clone)] +pub struct Summary { + inner: Rc, +} + +#[derive(Debug, Clone)] +struct Inner { + package_id: PackageId, + dependencies: Vec, + features: BTreeMap>, + checksum: Option, +} + +impl Summary { + pub fn new(pkg_id: PackageId, + dependencies: Vec, + features: BTreeMap>) -> CargoResult { + for dep in dependencies.iter() { + if features.get(dep.name()).is_some() { + bail!("Features and dependencies cannot have the \ + same name: `{}`", dep.name()) + } + if dep.is_optional() && !dep.is_transitive() { + bail!("Dev-dependencies are not allowed to be optional: `{}`", + dep.name()) + } + } + for (feature, list) in features.iter() { + for dep in list.iter() { + let mut parts = dep.splitn(2, '/'); + let dep = parts.next().unwrap(); + let is_reexport = parts.next().is_some(); + if !is_reexport && features.get(dep).is_some() { continue } + match dependencies.iter().find(|d| d.name() == dep) { + Some(d) => { + if d.is_optional() || is_reexport { continue } + bail!("Feature `{}` depends on `{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, dep) + } + None if is_reexport => { + bail!("Feature `{}` requires a feature of `{}` which is not a \ + dependency", feature, dep) + } + None => { + bail!("Feature `{}` includes `{}` which is neither \ + a dependency nor another feature", feature, dep) + } + } + } + } + Ok(Summary { + inner: Rc::new(Inner { + package_id: pkg_id, + dependencies: dependencies, + features: features, + checksum: None, + }), + }) + } + + pub fn package_id(&self) -> &PackageId { &self.inner.package_id } + pub fn name(&self) -> &str { self.package_id().name() } + pub fn version(&self) -> &Version { self.package_id().version() } + pub fn source_id(&self) -> &SourceId { self.package_id().source_id() } + pub fn dependencies(&self) -> &[Dependency] { &self.inner.dependencies } + pub fn features(&self) -> &BTreeMap> { &self.inner.features } + pub fn checksum(&self) -> Option<&str> { + self.inner.checksum.as_ref().map(|s| &s[..]) + } + + pub fn override_id(mut self, id: PackageId) -> Summary { + Rc::make_mut(&mut self.inner).package_id = id; + self + } + + pub fn set_checksum(mut self, cksum: String) -> Summary { + Rc::make_mut(&mut self.inner).checksum = Some(cksum); + self + } + + pub fn map_dependencies(mut self, f: F) -> Summary + where F: FnMut(Dependency) -> Dependency { + { + let slot = &mut Rc::make_mut(&mut self.inner).dependencies; + let deps = mem::replace(slot, Vec::new()); + *slot = deps.into_iter().map(f).collect(); + } + self + } + + pub fn map_source(self, to_replace: &SourceId, replace_with: &SourceId) + -> Summary { + let me = if self.package_id().source_id() == to_replace { + let new_id = self.package_id().with_source_id(replace_with); + self.override_id(new_id) + } else { + self + }; + me.map_dependencies(|dep| { + dep.map_source(to_replace, replace_with) + }) + } +} + +impl PartialEq for Summary { + fn eq(&self, other: &Summary) -> bool { + self.inner.package_id == other.inner.package_id + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/core/workspace.rs b/collector/compile-benchmarks/cargo/src/cargo/core/workspace.rs new file mode 100644 index 000000000..58b141269 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/core/workspace.rs @@ -0,0 +1,702 @@ +use std::collections::hash_map::{HashMap, Entry}; +use std::collections::BTreeMap; +use std::path::{Path, PathBuf}; +use std::slice; + +use glob::glob; +use url::Url; + +use core::{Package, VirtualManifest, EitherManifest, SourceId}; +use core::{PackageIdSpec, Dependency, Profile, Profiles}; +use util::{Config, Filesystem}; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; +use util::toml::read_manifest; + +/// The core abstraction in Cargo for working with a workspace of crates. +/// +/// A workspace is often created very early on and then threaded through all +/// other functions. It's typically through this object that the current +/// package is loaded and/or learned about. +#[derive(Debug)] +pub struct Workspace<'cfg> { + config: &'cfg Config, + + // This path is a path to where the current cargo subcommand was invoked + // from. That is, this is the `--manifest-path` argument to Cargo, and + // points to the "main crate" that we're going to worry about. + current_manifest: PathBuf, + + // A list of packages found in this workspace. Always includes at least the + // package mentioned by `current_manifest`. + packages: Packages<'cfg>, + + // If this workspace includes more than one crate, this points to the root + // of the workspace. This is `None` in the case that `[workspace]` is + // missing, `package.workspace` is missing, and no `Cargo.toml` above + // `current_manifest` was found on the filesystem with `[workspace]`. + root_manifest: Option, + + // Shared target directory for all the packages of this workspace. + // `None` if the default path of `root/target` should be used. + target_dir: Option, + + // List of members in this workspace with a listing of all their manifest + // paths. The packages themselves can be looked up through the `packages` + // set above. + members: Vec, + + // True, if this is a temporary workspace created for the purposes of + // cargo install or cargo package. + is_ephemeral: bool, + + // True if this workspace should enforce optional dependencies even when + // not needed; false if this workspace should only enforce dependencies + // needed by the current configuration (such as in cargo install). + require_optional_deps: bool, +} + +// Separate structure for tracking loaded packages (to avoid loading anything +// twice), and this is separate to help appease the borrow checker. +#[derive(Debug)] +struct Packages<'cfg> { + config: &'cfg Config, + packages: HashMap, +} + +#[derive(Debug)] +enum MaybePackage { + Package(Package), + Virtual(VirtualManifest), +} + +/// Configuration of a workspace in a manifest. +#[derive(Debug, Clone)] +pub enum WorkspaceConfig { + /// Indicates that `[workspace]` was present and the members were + /// optionally specified as well. + Root(WorkspaceRootConfig), + + /// Indicates that `[workspace]` was present and the `root` field is the + /// optional value of `package.workspace`, if present. + Member { root: Option }, +} + +/// Intermediate configuration of a workspace root in a manifest. +/// +/// Knows the Workspace Root path, as well as `members` and `exclude` lists of path patterns, which +/// together tell if some path is recognized as a member by this root or not. +#[derive(Debug, Clone)] +pub struct WorkspaceRootConfig { + root_dir: PathBuf, + members: Option>, + exclude: Vec, +} + +/// An iterator over the member packages of a workspace, returned by +/// `Workspace::members` +pub struct Members<'a, 'cfg: 'a> { + ws: &'a Workspace<'cfg>, + iter: slice::Iter<'a, PathBuf>, +} + +impl<'cfg> Workspace<'cfg> { + /// Creates a new workspace given the target manifest pointed to by + /// `manifest_path`. + /// + /// This function will construct the entire workspace by determining the + /// root and all member packages. It will then validate the workspace + /// before returning it, so `Ok` is only returned for valid workspaces. + pub fn new(manifest_path: &Path, config: &'cfg Config) + -> CargoResult> { + let target_dir = config.target_dir()?; + + let mut ws = Workspace { + config: config, + current_manifest: manifest_path.to_path_buf(), + packages: Packages { + config: config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir: target_dir, + members: Vec::new(), + is_ephemeral: false, + require_optional_deps: true, + }; + ws.root_manifest = ws.find_root(manifest_path)?; + ws.find_members()?; + ws.validate()?; + Ok(ws) + } + + pub fn current_manifest(&self) -> &Path { + &self.current_manifest + } + + /// Creates a "temporary workspace" from one package which only contains + /// that package. + /// + /// This constructor will not touch the filesystem and only creates an + /// in-memory workspace. That is, all configuration is ignored, it's just + /// intended for that one package. + /// + /// This is currently only used in niche situations like `cargo install` or + /// `cargo package`. + pub fn ephemeral(package: Package, + config: &'cfg Config, + target_dir: Option, + require_optional_deps: bool) -> CargoResult> { + let mut ws = Workspace { + config: config, + current_manifest: package.manifest_path().to_path_buf(), + packages: Packages { + config: config, + packages: HashMap::new(), + }, + root_manifest: None, + target_dir: None, + members: Vec::new(), + is_ephemeral: true, + require_optional_deps: require_optional_deps, + }; + { + let key = ws.current_manifest.parent().unwrap(); + let package = MaybePackage::Package(package); + ws.packages.packages.insert(key.to_path_buf(), package); + ws.target_dir = if let Some(dir) = target_dir { + Some(dir) + } else { + ws.config.target_dir()? + }; + ws.members.push(ws.current_manifest.clone()); + } + Ok(ws) + } + + /// Returns the current package of this workspace. + /// + /// Note that this can return an error if it the current manifest is + /// actually a "virtual Cargo.toml", in which case an error is returned + /// indicating that something else should be passed. + pub fn current(&self) -> CargoResult<&Package> { + self.current_opt().ok_or_else(|| + format!("manifest path `{}` is a virtual manifest, but this \ + command requires running against an actual package in \ + this workspace", self.current_manifest.display()).into() + ) + } + + pub fn current_opt(&self) -> Option<&Package> { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(ref p) => Some(p), + MaybePackage::Virtual(..) => None + } + } + + pub fn is_virtual(&self) -> bool { + match *self.packages.get(&self.current_manifest) { + MaybePackage::Package(..) => false, + MaybePackage::Virtual(..) => true + } + } + + /// Returns the `Config` this workspace is associated with. + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn profiles(&self) -> &Profiles { + let root = self.root_manifest.as_ref().unwrap_or(&self.current_manifest); + match *self.packages.get(root) { + MaybePackage::Package(ref p) => p.manifest().profiles(), + MaybePackage::Virtual(ref vm) => vm.profiles(), + } + } + + /// Returns the root path of this workspace. + /// + /// That is, this returns the path of the directory containing the + /// `Cargo.toml` which is the root of this workspace. + pub fn root(&self) -> &Path { + match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest + }.parent().unwrap() + } + + pub fn target_dir(&self) -> Filesystem { + self.target_dir.clone().unwrap_or_else(|| { + Filesystem::new(self.root().join("target")) + }) + } + + /// Returns the root [replace] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_replace(&self) -> &[(PackageIdSpec, Dependency)] { + let path = match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + }; + match *self.packages.get(path) { + MaybePackage::Package(ref p) => p.manifest().replace(), + MaybePackage::Virtual(ref vm) => vm.replace(), + } + } + + /// Returns the root [patch] section of this workspace. + /// + /// This may be from a virtual crate or an actual crate. + pub fn root_patch(&self) -> &HashMap> { + let path = match self.root_manifest { + Some(ref p) => p, + None => &self.current_manifest, + }; + match *self.packages.get(path) { + MaybePackage::Package(ref p) => p.manifest().patch(), + MaybePackage::Virtual(ref vm) => vm.patch(), + } + } + + /// Returns an iterator over all packages in this workspace + pub fn members<'a>(&'a self) -> Members<'a, 'cfg> { + Members { + ws: self, + iter: self.members.iter(), + } + } + + pub fn is_ephemeral(&self) -> bool { + self.is_ephemeral + } + + pub fn require_optional_deps(&self) -> bool { + self.require_optional_deps + } + + /// Finds the root of a workspace for the crate whose manifest is located + /// at `manifest_path`. + /// + /// This will parse the `Cargo.toml` at `manifest_path` and then interpret + /// the workspace configuration, optionally walking up the filesystem + /// looking for other workspace roots. + /// + /// Returns an error if `manifest_path` isn't actually a valid manifest or + /// if some other transient error happens. + fn find_root(&mut self, manifest_path: &Path) + -> CargoResult> { + fn read_root_pointer(member_manifest: &Path, root_link: &str) -> CargoResult { + let path = member_manifest.parent().unwrap() + .join(root_link) + .join("Cargo.toml"); + debug!("find_root - pointer {}", path.display()); + Ok(paths::normalize_path(&path)) + }; + + { + let current = self.packages.load(manifest_path)?; + match *current.workspace_config() { + WorkspaceConfig::Root(_) => { + debug!("find_root - is root {}", manifest_path.display()); + return Ok(Some(manifest_path.to_path_buf())) + } + WorkspaceConfig::Member { root: Some(ref path_to_root) } => { + return Ok(Some(read_root_pointer(manifest_path, path_to_root)?)) + } + WorkspaceConfig::Member { root: None } => {} + } + } + + for path in paths::ancestors(manifest_path).skip(2) { + let ances_manifest_path = path.join("Cargo.toml"); + debug!("find_root - trying {}", ances_manifest_path.display()); + if ances_manifest_path.exists() { + match *self.packages.load(&ances_manifest_path)?.workspace_config() { + WorkspaceConfig::Root(ref ances_root_config) => { + debug!("find_root - found a root checking exclusion"); + if !ances_root_config.is_excluded(&manifest_path) { + debug!("find_root - found!"); + return Ok(Some(ances_manifest_path)) + } + } + WorkspaceConfig::Member { root: Some(ref path_to_root) } => { + debug!("find_root - found pointer"); + return Ok(Some(read_root_pointer(&ances_manifest_path, path_to_root)?)) + } + WorkspaceConfig::Member { .. } => {} + } + } + } + + Ok(None) + } + + /// After the root of a workspace has been located, probes for all members + /// of a workspace. + /// + /// If the `workspace.members` configuration is present, then this just + /// verifies that those are all valid packages to point to. Otherwise, this + /// will transitively follow all `path` dependencies looking for members of + /// the workspace. + fn find_members(&mut self) -> CargoResult<()> { + let root_manifest_path = match self.root_manifest { + Some(ref path) => path.clone(), + None => { + debug!("find_members - only me as a member"); + self.members.push(self.current_manifest.clone()); + return Ok(()) + } + }; + + let members_paths = { + let root_package = self.packages.load(&root_manifest_path)?; + match *root_package.workspace_config() { + WorkspaceConfig::Root(ref root_config) => root_config.members_paths()?, + _ => bail!("root of a workspace inferred but wasn't a root: {}", + root_manifest_path.display()), + } + }; + + for path in members_paths { + self.find_path_deps(&path.join("Cargo.toml"), &root_manifest_path, false)?; + } + + self.find_path_deps(&root_manifest_path, &root_manifest_path, false) + } + + fn find_path_deps(&mut self, + manifest_path: &Path, + root_manifest: &Path, + is_path_dep: bool) -> CargoResult<()> { + let manifest_path = paths::normalize_path(manifest_path); + if self.members.iter().any(|p| p == &manifest_path) { + return Ok(()) + } + if is_path_dep + && !manifest_path.parent().unwrap().starts_with(self.root()) + && self.find_root(&manifest_path)? != self.root_manifest { + // If `manifest_path` is a path dependency outside of the workspace, + // don't add it, or any of its dependencies, as a members. + return Ok(()) + } + + match *self.packages.load(root_manifest)?.workspace_config() { + WorkspaceConfig::Root(ref root_config) => { + if root_config.is_excluded(&manifest_path) { + return Ok(()) + } + } + _ => {} + } + + debug!("find_members - {}", manifest_path.display()); + self.members.push(manifest_path.clone()); + + let candidates = { + let pkg = match *self.packages.load(&manifest_path)? { + MaybePackage::Package(ref p) => p, + MaybePackage::Virtual(_) => return Ok(()), + }; + pkg.dependencies() + .iter() + .map(|d| d.source_id()) + .filter(|d| d.is_path()) + .filter_map(|d| d.url().to_file_path().ok()) + .map(|p| p.join("Cargo.toml")) + .collect::>() + }; + for candidate in candidates { + self.find_path_deps(&candidate, root_manifest, true)?; + } + Ok(()) + } + + /// Validates a workspace, ensuring that a number of invariants are upheld: + /// + /// 1. A workspace only has one root. + /// 2. All workspace members agree on this one root as the root. + /// 3. The current crate is a member of this workspace. + fn validate(&mut self) -> CargoResult<()> { + if self.root_manifest.is_none() { + return Ok(()) + } + + let mut roots = Vec::new(); + { + let mut names = BTreeMap::new(); + for member in self.members.iter() { + let package = self.packages.get(member); + match *package.workspace_config() { + WorkspaceConfig::Root(_) => { + roots.push(member.parent().unwrap().to_path_buf()); + } + WorkspaceConfig::Member { .. } => {} + } + let name = match *package { + MaybePackage::Package(ref p) => p.name(), + MaybePackage::Virtual(_) => continue, + }; + if let Some(prev) = names.insert(name, member) { + bail!("two packages named `{}` in this workspace:\n\ + - {}\n\ + - {}", name, prev.display(), member.display()); + } + } + } + + match roots.len() { + 0 => { + bail!("`package.workspace` configuration points to a crate \ + which is not configured with [workspace]: \n\ + configuration at: {}\n\ + points to: {}", + self.current_manifest.display(), + self.root_manifest.as_ref().unwrap().display()) + } + 1 => {} + _ => { + bail!("multiple workspace roots found in the same workspace:\n{}", + roots.iter() + .map(|r| format!(" {}", r.display())) + .collect::>() + .join("\n")); + } + } + + for member in self.members.clone() { + let root = self.find_root(&member)?; + if root == self.root_manifest { + continue + } + + match root { + Some(root) => { + bail!("package `{}` is a member of the wrong workspace\n\ + expected: {}\n\ + actual: {}", + member.display(), + self.root_manifest.as_ref().unwrap().display(), + root.display()); + } + None => { + bail!("workspace member `{}` is not hierarchically below \ + the workspace root `{}`", + member.display(), + self.root_manifest.as_ref().unwrap().display()); + } + } + } + + if !self.members.contains(&self.current_manifest) { + let root = self.root_manifest.as_ref().unwrap(); + let root_dir = root.parent().unwrap(); + let current_dir = self.current_manifest.parent().unwrap(); + let root_pkg = self.packages.get(root); + + // FIXME: Make this more generic by using a relative path resolver between member and + // root. + let members_msg = match current_dir.strip_prefix(root_dir) { + Ok(rel) => { + format!("this may be fixable by adding `{}` to the \ + `workspace.members` array of the manifest \ + located at: {}", + rel.display(), + root.display()) + } + Err(_) => { + format!("this may be fixable by adding a member to \ + the `workspace.members` array of the \ + manifest located at: {}", root.display()) + } + }; + let extra = match *root_pkg { + MaybePackage::Virtual(_) => members_msg, + MaybePackage::Package(ref p) => { + let has_members_list = match *p.manifest().workspace_config() { + WorkspaceConfig::Root(ref root_config) => root_config.has_members_list(), + WorkspaceConfig::Member { .. } => unreachable!(), + }; + if !has_members_list { + format!("this may be fixable by ensuring that this \ + crate is depended on by the workspace \ + root: {}", root.display()) + } else { + members_msg + } + } + }; + bail!("current package believes it's in a workspace when it's not:\n\ + current: {}\n\ + workspace: {}\n\n{}", + self.current_manifest.display(), + root.display(), + extra); + } + + if let Some(ref root_manifest) = self.root_manifest { + let default_profiles = Profiles { + release: Profile::default_release(), + dev: Profile::default_dev(), + test: Profile::default_test(), + test_deps: Profile::default_dev(), + bench: Profile::default_bench(), + bench_deps: Profile::default_release(), + doc: Profile::default_doc(), + custom_build: Profile::default_custom_build(), + check: Profile::default_check(), + doctest: Profile::default_doctest(), + }; + + for pkg in self.members().filter(|p| p.manifest_path() != root_manifest) { + if pkg.manifest().profiles() != &default_profiles { + let message = &format!("profiles for the non root package will be ignored, \ + specify profiles at the workspace root:\n\ + package: {}\n\ + workspace: {}", + pkg.manifest_path().display(), + root_manifest.display()); + + //TODO: remove `Eq` bound from `Profiles` when the warning is removed. + self.config.shell().warn(&message)?; + } + } + } + + Ok(()) + } +} + + +impl<'cfg> Packages<'cfg> { + fn get(&self, manifest_path: &Path) -> &MaybePackage { + &self.packages[manifest_path.parent().unwrap()] + } + + fn load(&mut self, manifest_path: &Path) -> CargoResult<&MaybePackage> { + let key = manifest_path.parent().unwrap(); + match self.packages.entry(key.to_path_buf()) { + Entry::Occupied(e) => Ok(e.into_mut()), + Entry::Vacant(v) => { + let source_id = SourceId::for_path(key)?; + let (manifest, _nested_paths) = + read_manifest(manifest_path, &source_id, self.config)?; + Ok(v.insert(match manifest { + EitherManifest::Real(manifest) => { + MaybePackage::Package(Package::new(manifest, manifest_path)) + } + EitherManifest::Virtual(vm) => { + MaybePackage::Virtual(vm) + } + })) + } + } + } +} + +impl<'a, 'cfg> Members<'a, 'cfg> { + pub fn is_empty(self) -> bool { + self.count() == 0 + } +} + +impl<'a, 'cfg> Iterator for Members<'a, 'cfg> { + type Item = &'a Package; + + fn next(&mut self) -> Option<&'a Package> { + loop { + let next = self.iter.next().map(|path| { + self.ws.packages.get(path) + }); + match next { + Some(&MaybePackage::Package(ref p)) => return Some(p), + Some(&MaybePackage::Virtual(_)) => {} + None => return None, + } + } + } +} + +impl MaybePackage { + fn workspace_config(&self) -> &WorkspaceConfig { + match *self { + MaybePackage::Package(ref p) => p.manifest().workspace_config(), + MaybePackage::Virtual(ref vm) => vm.workspace_config(), + } + } +} + +impl WorkspaceRootConfig { + /// Create a new Intermediate Workspace Root configuration. + pub fn new( + root_dir: &Path, + members: &Option>, + exclude: &Option>, + ) -> WorkspaceRootConfig { + WorkspaceRootConfig { + root_dir: root_dir.to_path_buf(), + members: members.clone(), + exclude: exclude.clone().unwrap_or_default(), + } + } + + /// Checks the path against the `excluded` list. + /// + /// This method does NOT consider the `members` list. + fn is_excluded(&self, manifest_path: &Path) -> bool { + let excluded = self.exclude.iter().any(|ex| { + manifest_path.starts_with(self.root_dir.join(ex)) + }); + + let explicit_member = match self.members { + Some(ref members) => { + members.iter().any(|mem| { + manifest_path.starts_with(self.root_dir.join(mem)) + }) + } + None => false, + }; + + !explicit_member && excluded + } + + fn has_members_list(&self) -> bool { + self.members.is_some() + } + + fn members_paths(&self) -> CargoResult> { + let mut expanded_list = Vec::new(); + + if let Some(globs) = self.members.clone() { + for glob in globs { + let pathbuf = self.root_dir.join(glob); + let expanded_paths = Self::expand_member_path(&pathbuf)?; + + // If glob does not find any valid paths, then put the original + // path in the expanded list to maintain backwards compatibility. + if expanded_paths.is_empty() { + expanded_list.push(pathbuf); + } else { + expanded_list.extend(expanded_paths); + } + } + } + + Ok(expanded_list) + } + + fn expand_member_path(path: &Path) -> CargoResult> { + let path = match path.to_str() { + Some(p) => p, + None => return Ok(Vec::new()), + }; + let res = glob(path).chain_err(|| { + format!("could not parse pattern `{}`", &path) + })?; + res.map(|p| { + p.chain_err(|| { + format!("unable to match path to pattern `{}`", &path) + }) + }).collect() + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/lib.rs b/collector/compile-benchmarks/cargo/src/cargo/lib.rs new file mode 100755 index 000000000..f20118b80 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/lib.rs @@ -0,0 +1,255 @@ +#![deny(unused)] +#![cfg_attr(test, deny(warnings))] +#![recursion_limit="128"] + +#[macro_use] extern crate error_chain; +#[macro_use] extern crate log; +#[macro_use] extern crate scoped_tls; +#[macro_use] extern crate serde_derive; +#[macro_use] extern crate serde_json; +extern crate atty; +extern crate crates_io as registry; +extern crate crossbeam; +extern crate curl; +extern crate docopt; +extern crate filetime; +extern crate flate2; +extern crate fs2; +extern crate git2; +extern crate glob; +extern crate hex; +extern crate home; +extern crate ignore; +extern crate jobserver; +extern crate libc; +extern crate libgit2_sys; +extern crate num_cpus; +extern crate same_file; +extern crate semver; +extern crate serde; +extern crate serde_ignored; +extern crate shell_escape; +extern crate tar; +extern crate tempdir; +extern crate termcolor; +extern crate toml; +extern crate url; +#[cfg(target_os = "macos")] +extern crate core_foundation; + +use std::fmt; +use std::error::Error; + +use error_chain::ChainedError; +use serde::Deserialize; +use serde::ser; +use docopt::Docopt; + +use core::Shell; +use core::shell::Verbosity::Verbose; + +pub use util::{CargoError, CargoErrorKind, CargoResult, CliError, CliResult, Config}; + +pub const CARGO_ENV: &'static str = "CARGO"; + +macro_rules! bail { + ($($fmt:tt)*) => ( + return Err(::util::errors::CargoError::from(format_args!($($fmt)*).to_string())) + ) +} + +pub mod core; +pub mod ops; +pub mod sources; +pub mod util; + +pub struct CommitInfo { + pub short_commit_hash: String, + pub commit_hash: String, + pub commit_date: String, +} + +pub struct CfgInfo { + // Information about the git repository we may have been built from. + pub commit_info: Option, + // The release channel we were built for. + pub release_channel: String, +} + +pub struct VersionInfo { + pub major: String, + pub minor: String, + pub patch: String, + pub pre_release: Option, + // Information that's only available when we were built with + // configure/make, rather than cargo itself. + pub cfg_info: Option, +} + +impl fmt::Display for VersionInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "cargo {}.{}.{}", + self.major, self.minor, self.patch)?; + if let Some(channel) = self.cfg_info.as_ref().map(|ci| &ci.release_channel) { + if channel != "stable" { + write!(f, "-{}", channel)?; + let empty = String::from(""); + write!(f, "{}", self.pre_release.as_ref().unwrap_or(&empty))?; + } + }; + + if let Some(ref cfg) = self.cfg_info { + if let Some(ref ci) = cfg.commit_info { + write!(f, " ({} {})", + ci.short_commit_hash, ci.commit_date)?; + } + }; + Ok(()) + } +} + +pub fn call_main_without_stdin<'de, Flags: Deserialize<'de>>( + exec: fn(Flags, &mut Config) -> CliResult, + config: &mut Config, + usage: &str, + args: &[String], + options_first: bool) -> CliResult +{ + let docopt = Docopt::new(usage).unwrap() + .options_first(options_first) + .argv(args.iter().map(|s| &s[..])) + .help(true); + + let flags = docopt.deserialize().map_err(|e| { + let code = if e.fatal() {1} else {0}; + CliError::new(e.to_string().into(), code) + })?; + + exec(flags, config) +} + +pub fn print_json(obj: &T) { + let encoded = serde_json::to_string(&obj).unwrap(); + println!("{}", encoded); +} + +pub fn exit_with_error(err: CliError, shell: &mut Shell) -> ! { + debug!("exit_with_error; err={:?}", err); + + let CliError { error, exit_code, unknown } = err; + // exit_code == 0 is non-fatal error, e.g. docopt version info + let fatal = exit_code != 0; + + let hide = unknown && shell.verbosity() != Verbose; + + if let Some(error) = error { + if hide { + drop(shell.error("An unknown error occurred")) + } else if fatal { + drop(shell.error(&error)) + } else { + drop(writeln!(shell.err(), "{}", error)) + } + + if !handle_cause(error, shell) || hide { + drop(writeln!(shell.err(), "\nTo learn more, run the command again \ + with --verbose.")); + } + } + + std::process::exit(exit_code) +} + +pub fn handle_error(err: CargoError, shell: &mut Shell) { + debug!("handle_error; err={:?}", &err); + + let _ignored_result = shell.error(&err); + handle_cause(err, shell); +} + +fn handle_cause(cargo_err: E, shell: &mut Shell) -> bool + where E: ChainedError + 'static +{ + fn print(error: String, shell: &mut Shell) { + drop(writeln!(shell.err(), "\nCaused by:")); + drop(writeln!(shell.err(), " {}", error)); + } + + //Error inspection in non-verbose mode requires inspecting the + //error kind to avoid printing Internal errors. The downcasting + //machinery requires &(Error + 'static), but the iterator (and + //underlying `cause`) return &Error. Because the borrows are + //constrained to this handling method, and because the original + //error object is constrained to be 'static, we're casting away + //the borrow's actual lifetime for purposes of downcasting and + //inspecting the error chain + unsafe fn extend_lifetime(r: &Error) -> &(Error + 'static) { + std::mem::transmute::<&Error, &Error>(r) + } + + let verbose = shell.verbosity(); + + if verbose == Verbose { + //The first error has already been printed to the shell + //Print all remaining errors + for err in cargo_err.iter().skip(1) { + print(err.to_string(), shell); + } + } else { + //The first error has already been printed to the shell + //Print remaining errors until one marked as Internal appears + for err in cargo_err.iter().skip(1) { + let err = unsafe { extend_lifetime(err) }; + if let Some(&CargoError(CargoErrorKind::Internal(..), ..)) = + err.downcast_ref::() { + return false; + } + + print(err.to_string(), shell); + } + } + + true +} + +pub fn version() -> VersionInfo { + macro_rules! env_str { + ($name:expr) => { env!($name).to_string() } + } + macro_rules! option_env_str { + ($name:expr) => { option_env!($name).map(|s| s.to_string()) } + } + match option_env!("CFG_RELEASE_CHANNEL") { + // We have environment variables set up from configure/make. + Some(_) => { + let commit_info = + option_env!("CFG_COMMIT_HASH").map(|s| { + CommitInfo { + commit_hash: s.to_string(), + short_commit_hash: option_env_str!("CFG_SHORT_COMMIT_HASH").unwrap(), + commit_date: option_env_str!("CFG_COMMIT_DATE").unwrap(), + } + }); + VersionInfo { + major: env_str!("CARGO_PKG_VERSION_MAJOR"), + minor: env_str!("CARGO_PKG_VERSION_MINOR"), + patch: env_str!("CARGO_PKG_VERSION_PATCH"), + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: Some(CfgInfo { + release_channel: option_env_str!("CFG_RELEASE_CHANNEL").unwrap(), + commit_info: commit_info, + }), + } + }, + // We are being compiled by Cargo itself. + None => { + VersionInfo { + major: env_str!("CARGO_PKG_VERSION_MAJOR"), + minor: env_str!("CARGO_PKG_VERSION_MINOR"), + patch: env_str!("CARGO_PKG_VERSION_PATCH"), + pre_release: option_env_str!("CARGO_PKG_VERSION_PRE"), + cfg_info: None, + } + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_clean.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_clean.rs new file mode 100644 index 000000000..9b266b0bb --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_clean.rs @@ -0,0 +1,109 @@ +use std::default::Default; +use std::fs; +use std::path::Path; + +use core::{Profiles, Workspace}; +use util::Config; +use util::errors::{CargoResult, CargoResultExt}; +use ops::{self, Context, BuildConfig, Kind, Unit}; + +pub struct CleanOptions<'a> { + pub spec: &'a [String], + pub target: Option<&'a str>, + pub config: &'a Config, + pub release: bool, +} + +/// Cleans the project from build artifacts. +pub fn clean(ws: &Workspace, opts: &CleanOptions) -> CargoResult<()> { + let target_dir = ws.target_dir(); + + // If we have a spec, then we need to delete some packages, otherwise, just + // remove the whole target directory and be done with it! + // + // Note that we don't bother grabbing a lock here as we're just going to + // blow it all away anyway. + if opts.spec.is_empty() { + let target_dir = target_dir.into_path_unlocked(); + return rm_rf(&target_dir); + } + + let (packages, resolve) = ops::resolve_ws(ws)?; + + let profiles = ws.profiles(); + let host_triple = opts.config.rustc()?.host.clone(); + let mut cx = Context::new(ws, &resolve, &packages, opts.config, + BuildConfig { + host_triple, + requested_target: opts.target.map(|s| s.to_owned()), + release: opts.release, + jobs: 1, + ..BuildConfig::default() + }, + profiles)?; + let mut units = Vec::new(); + + for spec in opts.spec { + // Translate the spec to a Package + let pkgid = resolve.query(spec)?; + let pkg = packages.get(pkgid)?; + + // Generate all relevant `Unit` targets for this package + for target in pkg.targets() { + for kind in [Kind::Host, Kind::Target].iter() { + let Profiles { + ref release, ref dev, ref test, ref bench, ref doc, + ref custom_build, ref test_deps, ref bench_deps, ref check, + ref doctest, + } = *profiles; + let profiles = [release, dev, test, bench, doc, custom_build, + test_deps, bench_deps, check, doctest]; + for profile in profiles.iter() { + units.push(Unit { + pkg, + target, + profile, + kind: *kind, + }); + } + } + } + } + + cx.probe_target_info(&units)?; + + for unit in units.iter() { + rm_rf(&cx.fingerprint_dir(unit))?; + if unit.target.is_custom_build() { + if unit.profile.run_custom_build { + rm_rf(&cx.build_script_out_dir(unit))?; + } else { + rm_rf(&cx.build_script_dir(unit))?; + } + continue + } + + for &(ref src, ref link_dst, _) in cx.target_filenames(unit)?.iter() { + rm_rf(src)?; + if let Some(ref dst) = *link_dst { + rm_rf(dst)?; + } + } + } + + Ok(()) +} + +fn rm_rf(path: &Path) -> CargoResult<()> { + let m = fs::metadata(path); + if m.as_ref().map(|s| s.is_dir()).unwrap_or(false) { + fs::remove_dir_all(path).chain_err(|| { + "could not remove build directory" + })?; + } else if m.is_ok() { + fs::remove_file(path).chain_err(|| { + "failed to remove build artifact" + })?; + } + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_compile.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_compile.rs new file mode 100644 index 000000000..685911203 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_compile.rs @@ -0,0 +1,784 @@ +//! +//! Cargo compile currently does the following steps: +//! +//! All configurations are already injected as environment variables via the +//! main cargo command +//! +//! 1. Read the manifest +//! 2. Shell out to `cargo-resolve` with a list of dependencies and sources as +//! stdin +//! +//! a. Shell out to `--do update` and `--do list` for each source +//! b. Resolve dependencies and return a list of name/version/source +//! +//! 3. Shell out to `--do download` for each source +//! 4. Shell out to `--do get` for each source, and build up the list of paths +//! to pass to rustc -L +//! 5. Call `cargo-rustc` with the results of the resolver zipped together with +//! the results of the `get` +//! +//! a. Topologically sort the dependencies +//! b. Compile each dependency in order, passing in the -L's pointing at each +//! previously compiled dependency +//! + +use std::collections::{HashMap, HashSet}; +use std::default::Default; +use std::path::PathBuf; +use std::sync::Arc; + +use core::{Source, Package, Target}; +use core::{Profile, TargetKind, Profiles, Workspace, PackageId, PackageIdSpec}; +use core::resolver::Resolve; +use ops::{self, BuildOutput, Executor, DefaultExecutor}; +use util::config::Config; +use util::{CargoResult, profile}; +use util::errors::{CargoResultExt, CargoError}; + +/// Contains information about how a package should be compiled. +#[derive(Debug)] +pub struct CompileOptions<'a> { + pub config: &'a Config, + /// Number of concurrent jobs to use. + pub jobs: Option, + /// The target platform to compile for (example: `i686-unknown-linux-gnu`). + pub target: Option<&'a str>, + /// Extra features to build for the root package + pub features: &'a [String], + /// Flag whether all available features should be built for the root package + pub all_features: bool, + /// Flag if the default feature should be built for the root package + pub no_default_features: bool, + /// A set of packages to build. + pub spec: Packages<'a>, + /// Filter to apply to the root package to select which targets will be + /// built. + pub filter: CompileFilter<'a>, + /// Whether this is a release build or not + pub release: bool, + /// Mode for this compile. + pub mode: CompileMode, + /// `--error_format` flag for the compiler. + pub message_format: MessageFormat, + /// Extra arguments to be passed to rustdoc (for main crate and dependencies) + pub target_rustdoc_args: Option<&'a [String]>, + /// The specified target will be compiled with all the available arguments, + /// note that this only accounts for the *final* invocation of rustc + pub target_rustc_args: Option<&'a [String]>, +} + +impl<'a> CompileOptions<'a> { + pub fn default(config: &'a Config, mode: CompileMode) -> CompileOptions<'a> + { + CompileOptions { + config: config, + jobs: None, + target: None, + features: &[], + all_features: false, + no_default_features: false, + spec: ops::Packages::Packages(&[]), + mode: mode, + release: false, + filter: CompileFilter::Default { required_features_filterable: false }, + message_format: MessageFormat::Human, + target_rustdoc_args: None, + target_rustc_args: None, + } + } +} + +#[derive(Clone, Copy, PartialEq, Debug)] +pub enum CompileMode { + Test, + Build, + Check, + Bench, + Doc { deps: bool }, + Doctest, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize)] +pub enum MessageFormat { + Human, + Json +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Packages<'a> { + All, + OptOut(&'a [String]), + Packages(&'a [String]), +} + +impl<'a> Packages<'a> { + pub fn from_flags(virtual_ws: bool, all: bool, exclude: &'a [String], package: &'a [String]) + -> CargoResult + { + let all = all || (virtual_ws && package.is_empty()); + + let packages = match (all, &exclude) { + (true, exclude) if exclude.is_empty() => Packages::All, + (true, exclude) => Packages::OptOut(exclude), + (false, exclude) if !exclude.is_empty() => bail!("--exclude can only be used together \ + with --all"), + _ => Packages::Packages(package), + }; + + Ok(packages) + } + + pub fn into_package_id_specs(self, ws: &Workspace) -> CargoResult> { + let specs = match self { + Packages::All => { + ws.members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect() + } + Packages::OptOut(opt_out) => { + ws.members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .filter(|p| opt_out.iter().position(|x| *x == p.name()).is_none()) + .collect() + } + Packages::Packages(packages) if packages.is_empty() => { + ws.current_opt() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .into_iter().collect() + } + Packages::Packages(packages) => { + packages.iter().map(|p| PackageIdSpec::parse(p)).collect::>>()? + } + }; + Ok(specs) + } +} + +#[derive(Clone, Copy, Debug)] +pub enum FilterRule<'a> { + All, + Just (&'a [String]), +} + +#[derive(Debug)] +pub enum CompileFilter<'a> { + Default { + /// Flag whether targets can be safely skipped when required-features are not satisfied. + required_features_filterable: bool, + }, + Only { + lib: bool, + bins: FilterRule<'a>, + examples: FilterRule<'a>, + tests: FilterRule<'a>, + benches: FilterRule<'a>, + } +} + +pub fn compile<'a>(ws: &Workspace<'a>, options: &CompileOptions<'a>) + -> CargoResult> { + compile_with_exec(ws, options, Arc::new(DefaultExecutor)) +} + +pub fn compile_with_exec<'a>(ws: &Workspace<'a>, + options: &CompileOptions<'a>, + exec: Arc) + -> CargoResult> { + for member in ws.members() { + for warning in member.manifest().warnings().iter() { + if warning.is_critical { + let err: CargoResult<_> = Err(CargoError::from(warning.message.to_owned())); + return err.chain_err(|| { + format!("failed to parse manifest at `{}`", member.manifest_path().display()) + }) + } else { + options.config.shell().warn(&warning.message)? + } + } + } + compile_ws(ws, None, options, exec) +} + +pub fn compile_ws<'a>(ws: &Workspace<'a>, + source: Option>, + options: &CompileOptions<'a>, + exec: Arc) + -> CargoResult> { + let CompileOptions { config, jobs, target, spec, features, + all_features, no_default_features, + release, mode, message_format, + ref filter, + ref target_rustdoc_args, + ref target_rustc_args } = *options; + + let target = target.map(|s| s.to_string()); + + if jobs == Some(0) { + bail!("jobs must be at least 1") + } + + let profiles = ws.profiles(); + + let specs = spec.into_package_id_specs(ws)?; + let resolve = ops::resolve_ws_precisely(ws, + source, + features, + all_features, + no_default_features, + &specs)?; + let (packages, resolve_with_overrides) = resolve; + + if specs.is_empty() { + return Err(format!("manifest path `{}` contains no package: The manifest is virtual, \ + and the workspace has no members.", ws.current_manifest().display()).into()); + }; + + let to_builds = specs.iter().map(|p| { + let pkgid = p.query(resolve_with_overrides.iter())?; + let p = packages.get(pkgid)?; + p.manifest().print_teapot(ws.config()); + Ok(p) + }).collect::>>()?; + + let mut general_targets = Vec::new(); + let mut package_targets = Vec::new(); + + match (*target_rustc_args, *target_rustdoc_args) { + (Some(..), _) | + (_, Some(..)) if to_builds.len() != 1 => { + panic!("`rustc` and `rustdoc` should not accept multiple `-p` flags") + } + (Some(args), _) => { + let all_features = resolve_all_features(&resolve_with_overrides, + to_builds[0].package_id()); + let targets = generate_targets(to_builds[0], profiles, + mode, filter, &all_features, release)?; + if targets.len() == 1 { + let (target, profile) = targets[0]; + let mut profile = profile.clone(); + profile.rustc_args = Some(args.to_vec()); + general_targets.push((target, profile)); + } else { + bail!("extra arguments to `rustc` can only be passed to one \ + target, consider filtering\nthe package by passing \ + e.g. `--lib` or `--bin NAME` to specify a single target") + } + } + (None, Some(args)) => { + let all_features = resolve_all_features(&resolve_with_overrides, + to_builds[0].package_id()); + let targets = generate_targets(to_builds[0], profiles, + mode, filter, &all_features, release)?; + if targets.len() == 1 { + let (target, profile) = targets[0]; + let mut profile = profile.clone(); + profile.rustdoc_args = Some(args.to_vec()); + general_targets.push((target, profile)); + } else { + bail!("extra arguments to `rustdoc` can only be passed to one \ + target, consider filtering\nthe package by passing e.g. \ + `--lib` or `--bin NAME` to specify a single target") + } + } + (None, None) => { + for &to_build in to_builds.iter() { + let all_features = resolve_all_features(&resolve_with_overrides, + to_build.package_id()); + let targets = generate_targets(to_build, profiles, mode, + filter, &all_features, release)?; + package_targets.push((to_build, targets)); + } + } + }; + + for &(target, ref profile) in &general_targets { + for &to_build in to_builds.iter() { + package_targets.push((to_build, vec![(target, profile)])); + } + } + + let mut ret = { + let _p = profile::start("compiling"); + let mut build_config = scrape_build_config(config, jobs, target)?; + build_config.release = release; + build_config.test = mode == CompileMode::Test || mode == CompileMode::Bench; + build_config.json_messages = message_format == MessageFormat::Json; + if let CompileMode::Doc { deps } = mode { + build_config.doc_all = deps; + } + + ops::compile_targets(ws, + &package_targets, + &packages, + &resolve_with_overrides, + config, + build_config, + profiles, + exec)? + }; + + ret.to_doc_test = to_builds.into_iter().cloned().collect(); + + return Ok(ret); + + fn resolve_all_features(resolve_with_overrides: &Resolve, + package_id: &PackageId) + -> HashSet { + let mut features = resolve_with_overrides.features(package_id).clone(); + + // Include features enabled for use by dependencies so targets can also use them with the + // required-features field when deciding whether to be built or skipped. + let deps = resolve_with_overrides.deps(package_id); + for dep in deps { + for feature in resolve_with_overrides.features(dep) { + features.insert(dep.name().to_string() + "/" + feature); + } + } + + features + } +} + +impl<'a> FilterRule<'a> { + pub fn new(targets: &'a [String], all: bool) -> FilterRule<'a> { + if all { + FilterRule::All + } else { + FilterRule::Just(targets) + } + } + + fn matches(&self, target: &Target) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(targets) => { + targets.iter().any(|x| *x == target.name()) + }, + } + } + + fn is_specific(&self) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(targets) => !targets.is_empty(), + } + } + + pub fn try_collect(&self) -> Option> { + match *self { + FilterRule::All => None, + FilterRule::Just(targets) => Some(targets.to_vec()), + } + } +} + +impl<'a> CompileFilter<'a> { + pub fn new(lib_only: bool, + bins: &'a [String], all_bins: bool, + tsts: &'a [String], all_tsts: bool, + exms: &'a [String], all_exms: bool, + bens: &'a [String], all_bens: bool, + all_targets: bool) -> CompileFilter<'a> { + let rule_bins = FilterRule::new(bins, all_bins); + let rule_tsts = FilterRule::new(tsts, all_tsts); + let rule_exms = FilterRule::new(exms, all_exms); + let rule_bens = FilterRule::new(bens, all_bens); + + if all_targets { + CompileFilter::Only { + lib: true, bins: FilterRule::All, + examples: FilterRule::All, benches: FilterRule::All, + tests: FilterRule::All, + } + } else if lib_only || rule_bins.is_specific() || rule_tsts.is_specific() + || rule_exms.is_specific() || rule_bens.is_specific() { + CompileFilter::Only { + lib: lib_only, bins: rule_bins, + examples: rule_exms, benches: rule_bens, + tests: rule_tsts, + } + } else { + CompileFilter::Default { + required_features_filterable: true, + } + } + } + + pub fn matches(&self, target: &Target) -> bool { + match *self { + CompileFilter::Default { .. } => true, + CompileFilter::Only { lib, bins, examples, tests, benches } => { + let rule = match *target.kind() { + TargetKind::Bin => bins, + TargetKind::Test => tests, + TargetKind::Bench => benches, + TargetKind::ExampleBin | + TargetKind::ExampleLib(..) => examples, + TargetKind::Lib(..) => return lib, + TargetKind::CustomBuild => return false, + }; + rule.matches(target) + } + } + } + + pub fn is_specific(&self) -> bool { + match *self { + CompileFilter::Default { .. } => false, + CompileFilter::Only { .. } => true, + } + } +} + +#[derive(Clone, Copy, Debug)] +struct BuildProposal<'a> { + target: &'a Target, + profile: &'a Profile, + required: bool, +} + +fn generate_auto_targets<'a>(mode: CompileMode, targets: &'a [Target], + profile: &'a Profile, + dep: &'a Profile, + required_features_filterable: bool) -> Vec> { + match mode { + CompileMode::Bench => { + targets.iter().filter(|t| t.benched()).map(|t| { + BuildProposal { + target: t, + profile: profile, + required: !required_features_filterable, + } + }).collect::>() + } + CompileMode::Test => { + let mut base = targets.iter().filter(|t| { + t.tested() + }).map(|t| { + BuildProposal { + target: t, + profile: if t.is_example() {dep} else {profile}, + required: !required_features_filterable, + } + }).collect::>(); + + // Always compile the library if we're testing everything as + // it'll be needed for doctests + if let Some(t) = targets.iter().find(|t| t.is_lib()) { + if t.doctested() { + base.push(BuildProposal { + target: t, + profile: dep, + required: !required_features_filterable, + }); + } + } + base + } + CompileMode::Build | CompileMode::Check => { + targets.iter().filter(|t| { + t.is_bin() || t.is_lib() + }).map(|t| BuildProposal { + target: t, + profile: profile, + required: !required_features_filterable, + }).collect() + } + CompileMode::Doc { .. } => { + targets.iter().filter(|t| { + t.documented() + }).map(|t| BuildProposal { + target: t, + profile: profile, + required: !required_features_filterable, + }).collect() + } + CompileMode::Doctest => { + if let Some(t) = targets.iter().find(|t| t.is_lib()) { + if t.doctested() { + return vec![BuildProposal { + target: t, + profile: profile, + required: !required_features_filterable, + }]; + } + } + + Vec::new() + } + } +} + +/// Given a filter rule and some context, propose a list of targets +fn propose_indicated_targets<'a>(pkg: &'a Package, + rule: FilterRule, + desc: &'static str, + is_expected_kind: fn(&Target) -> bool, + profile: &'a Profile) -> CargoResult>> { + match rule { + FilterRule::All => { + let result = pkg.targets().iter().filter(|t| is_expected_kind(t)).map(|t| { + BuildProposal { + target: t, + profile: profile, + required: false, + } + }); + Ok(result.collect()) + } + FilterRule::Just(names) => { + let mut targets = Vec::new(); + for name in names { + let target = pkg.targets().iter().find(|t| { + t.name() == *name && is_expected_kind(t) + }); + let t = match target { + Some(t) => t, + None => { + let suggestion = pkg.find_closest_target(name, is_expected_kind); + match suggestion { + Some(s) => { + let suggested_name = s.name(); + bail!("no {} target named `{}`\n\nDid you mean `{}`?", + desc, name, suggested_name) + } + None => bail!("no {} target named `{}`", desc, name), + } + } + }; + debug!("found {} `{}`", desc, name); + targets.push(BuildProposal { + target: t, + profile: profile, + required: true, + }); + } + Ok(targets) + } + } +} + +/// Collect the targets that are libraries or have all required features available. +fn filter_compatible_targets<'a>(mut proposals: Vec>, + features: &HashSet) + -> CargoResult> { + let mut compatible = Vec::with_capacity(proposals.len()); + for proposal in proposals.drain(..) { + let unavailable_features = match proposal.target.required_features() { + Some(rf) => rf.iter().filter(|f| !features.contains(*f)).collect(), + None => Vec::new(), + }; + if proposal.target.is_lib() || unavailable_features.is_empty() { + compatible.push((proposal.target, proposal.profile)); + } else if proposal.required { + let required_features = proposal.target.required_features().unwrap(); + let quoted_required_features: Vec = required_features.iter() + .map(|s| format!("`{}`",s)) + .collect(); + bail!("target `{}` requires the features: {}\n\ + Consider enabling them by passing e.g. `--features=\"{}\"`", + proposal.target.name(), + quoted_required_features.join(", "), + required_features.join(" ")); + } + } + Ok(compatible) +} + +/// Given the configuration for a build, this function will generate all +/// target/profile combinations needed to be built. +fn generate_targets<'a>(pkg: &'a Package, + profiles: &'a Profiles, + mode: CompileMode, + filter: &CompileFilter, + features: &HashSet, + release: bool) + -> CargoResult> { + let build = if release {&profiles.release} else {&profiles.dev}; + let test = if release {&profiles.bench} else {&profiles.test}; + let profile = match mode { + CompileMode::Test => test, + CompileMode::Bench => &profiles.bench, + CompileMode::Build => build, + CompileMode::Check => &profiles.check, + CompileMode::Doc { .. } => &profiles.doc, + CompileMode::Doctest => &profiles.doctest, + }; + + let targets = match *filter { + CompileFilter::Default { required_features_filterable } => { + let deps = if release { + &profiles.bench_deps + } else { + &profiles.test_deps + }; + generate_auto_targets(mode, pkg.targets(), profile, deps, required_features_filterable) + } + CompileFilter::Only { lib, bins, examples, tests, benches } => { + let mut targets = Vec::new(); + + if lib { + if let Some(t) = pkg.targets().iter().find(|t| t.is_lib()) { + targets.push(BuildProposal { + target: t, + profile: profile, + required: true, + }); + } else { + bail!("no library targets found") + } + } + + targets.append(&mut propose_indicated_targets( + pkg, bins, "bin", Target::is_bin, profile)?); + targets.append(&mut propose_indicated_targets( + pkg, examples, "example", Target::is_example, build)?); + targets.append(&mut propose_indicated_targets( + pkg, tests, "test", Target::is_test, test)?); + targets.append(&mut propose_indicated_targets( + pkg, benches, "bench", Target::is_bench, &profiles.bench)?); + targets + } + }; + + filter_compatible_targets(targets, features) +} + +/// Parse all config files to learn about build configuration. Currently +/// configured options are: +/// +/// * build.jobs +/// * build.target +/// * target.$target.ar +/// * target.$target.linker +/// * target.$target.libfoo.metadata +fn scrape_build_config(config: &Config, + jobs: Option, + target: Option) + -> CargoResult { + if jobs.is_some() && config.jobserver_from_env().is_some() { + config.shell().warn("a `-j` argument was passed to Cargo but Cargo is \ + also configured with an external jobserver in \ + its environment, ignoring the `-j` parameter")?; + } + let cfg_jobs = match config.get_i64("build.jobs")? { + Some(v) => { + if v.val <= 0 { + bail!("build.jobs must be positive, but found {} in {}", + v.val, v.definition) + } else if v.val >= i64::from(u32::max_value()) { + bail!("build.jobs is too large: found {} in {}", v.val, + v.definition) + } else { + Some(v.val as u32) + } + } + None => None, + }; + let jobs = jobs.or(cfg_jobs).unwrap_or(::num_cpus::get() as u32); + let cfg_target = config.get_string("build.target")?.map(|s| s.val); + let target = target.or(cfg_target); + let mut base = ops::BuildConfig { + host_triple: config.rustc()?.host.clone(), + requested_target: target.clone(), + jobs: jobs, + ..Default::default() + }; + base.host = scrape_target_config(config, &base.host_triple)?; + base.target = match target.as_ref() { + Some(triple) => scrape_target_config(config, triple)?, + None => base.host.clone(), + }; + Ok(base) +} + +fn scrape_target_config(config: &Config, triple: &str) + -> CargoResult { + + let key = format!("target.{}", triple); + let mut ret = ops::TargetConfig { + ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val), + linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val), + overrides: HashMap::new(), + }; + let table = match config.get_table(&key)? { + Some(table) => table.val, + None => return Ok(ret), + }; + for (lib_name, value) in table { + match lib_name.as_str() { + "ar" | "linker" | "runner" | "rustflags" => { + continue + }, + _ => {} + } + + let mut output = BuildOutput { + library_paths: Vec::new(), + library_links: Vec::new(), + cfgs: Vec::new(), + env: Vec::new(), + metadata: Vec::new(), + rerun_if_changed: Vec::new(), + rerun_if_env_changed: Vec::new(), + warnings: Vec::new(), + }; + // We require deterministic order of evaluation, so we must sort the pairs by key first. + let mut pairs = Vec::new(); + for (k, value) in value.table(&lib_name)?.0 { + pairs.push((k,value)); + } + pairs.sort_by_key( |p| p.0 ); + for (k,value) in pairs{ + let key = format!("{}.{}", key, k); + match &k[..] { + "rustc-flags" => { + let (flags, definition) = value.string(k)?; + let whence = format!("in `{}` (in {})", key, + definition.display()); + let (paths, links) = + BuildOutput::parse_rustc_flags(flags, &whence) + ?; + output.library_paths.extend(paths); + output.library_links.extend(links); + } + "rustc-link-lib" => { + let list = value.list(k)?; + output.library_links.extend(list.iter() + .map(|v| v.0.clone())); + } + "rustc-link-search" => { + let list = value.list(k)?; + output.library_paths.extend(list.iter().map(|v| { + PathBuf::from(&v.0) + })); + } + "rustc-cfg" => { + let list = value.list(k)?; + output.cfgs.extend(list.iter().map(|v| v.0.clone())); + } + "rustc-env" => { + for (name, val) in value.table(k)?.0 { + let val = val.string(name)?.0; + output.env.push((name.clone(), val.to_string())); + } + } + "warning" | + "rerun-if-changed" | + "rerun-if-env-changed" => { + bail!("`{}` is not supported in build script overrides", k); + } + _ => { + let val = value.string(k)?.0; + output.metadata.push((k.clone(), val.to_string())); + } + } + } + ret.overrides.insert(lib_name, output); + } + + Ok(ret) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_doc.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_doc.rs new file mode 100644 index 000000000..d4d562036 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_doc.rs @@ -0,0 +1,149 @@ +use std::collections::HashMap; +use std::fs; +use std::path::Path; +use std::process::Command; + +use core::Workspace; +use ops; +use util::CargoResult; + +pub struct DocOptions<'a> { + pub open_result: bool, + pub compile_opts: ops::CompileOptions<'a>, +} + +pub fn doc(ws: &Workspace, options: &DocOptions) -> CargoResult<()> { + let specs = options.compile_opts.spec.into_package_id_specs(ws)?; + let resolve = ops::resolve_ws_precisely(ws, + None, + options.compile_opts.features, + options.compile_opts.all_features, + options.compile_opts.no_default_features, + &specs)?; + let (packages, resolve_with_overrides) = resolve; + + if specs.is_empty() { + return Err(format!("manifest path `{}` contains no package: The manifest is virtual, \ + and the workspace has no members.", ws.current_manifest().display()).into()); + }; + + let pkgs = specs.iter().map(|p| { + let pkgid = p.query(resolve_with_overrides.iter())?; + packages.get(pkgid) + }).collect::>>()?; + + let mut lib_names = HashMap::new(); + let mut bin_names = HashMap::new(); + for package in &pkgs { + for target in package.targets().iter().filter(|t| t.documented()) { + if target.is_lib() { + if let Some(prev) = lib_names.insert(target.crate_name(), package) { + bail!("The library `{}` is specified by packages `{}` and \ + `{}` but can only be documented once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), prev, package); + } + } else { + if let Some(prev) = bin_names.insert(target.crate_name(), package) { + bail!("The binary `{}` is specified by packages `{}` and \ + `{}` but can be documented only once. Consider renaming \ + or marking one of the targets as `doc = false`.", + target.crate_name(), prev, package); + } + } + } + for (bin, bin_package) in bin_names.iter() { + if let Some(lib_package) = lib_names.get(bin) { + bail!("The target `{}` is specified as a library {}. It can be \ + documented only once. Consider renaming or marking one \ + of the targets as `doc = false`.", + bin, + if lib_package == bin_package { + format!("and as a binary by package `{}`", lib_package) + } else { + format!("by package `{}` and as a binary by \ + package `{}`", lib_package, bin_package) + }); + } + } + } + + ops::compile(ws, &options.compile_opts)?; + + if options.open_result { + let name = if pkgs.len() > 1 { + bail!("Passing multiple packages and `open` is not supported") + } else if pkgs.len() == 1 { + pkgs[0].name().replace("-", "_") + } else { + match lib_names.keys().chain(bin_names.keys()).nth(0) { + Some(s) => s.to_string(), + None => return Ok(()), + } + }; + + // Don't bother locking here as if this is getting deleted there's + // nothing we can do about it and otherwise if it's getting overwritten + // then that's also ok! + let mut target_dir = ws.target_dir(); + if let Some(triple) = options.compile_opts.target { + target_dir.push(Path::new(triple).file_stem().unwrap()); + } + let path = target_dir.join("doc").join(&name).join("index.html"); + let path = path.into_path_unlocked(); + if fs::metadata(&path).is_ok() { + let mut shell = options.compile_opts.config.shell(); + shell.status("Opening", path.display())?; + match open_docs(&path) { + Ok(m) => shell.status("Launching", m)?, + Err(e) => { + shell.warn( + "warning: could not determine a browser to open docs with, tried:")?; + for method in e { + shell.warn(format!("\t{}", method))?; + } + } + } + } + } + + Ok(()) +} + +#[cfg(not(any(target_os = "windows", target_os = "macos")))] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + use std::env; + let mut methods = Vec::new(); + // trying $BROWSER + if let Ok(name) = env::var("BROWSER") { + match Command::new(name).arg(path).status() { + Ok(_) => return Ok("$BROWSER"), + Err(_) => methods.push("$BROWSER"), + } + } + + for m in ["xdg-open", "gnome-open", "kde-open"].iter() { + match Command::new(m).arg(path).status() { + Ok(_) => return Ok(m), + Err(_) => methods.push(m), + } + } + + Err(methods) +} + +#[cfg(target_os = "windows")] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + match Command::new("cmd").arg("/C").arg(path).status() { + Ok(_) => Ok("cmd /C"), + Err(_) => Err(vec!["cmd /C"]), + } +} + +#[cfg(target_os = "macos")] +fn open_docs(path: &Path) -> Result<&'static str, Vec<&'static str>> { + match Command::new("open").arg(path).status() { + Ok(_) => Ok("open"), + Err(_) => Err(vec!["open"]), + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_fetch.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_fetch.rs new file mode 100644 index 000000000..80dfdd085 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_fetch.rs @@ -0,0 +1,12 @@ +use core::{Resolve, PackageSet, Workspace}; +use ops; +use util::CargoResult; + +/// Executes `cargo fetch`. +pub fn fetch<'a>(ws: &Workspace<'a>) -> CargoResult<(Resolve, PackageSet<'a>)> { + let (packages, resolve) = ops::resolve_ws(ws)?; + for id in resolve.iter() { + packages.get(id)?; + } + Ok((resolve, packages)) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_generate_lockfile.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_generate_lockfile.rs new file mode 100644 index 000000000..d07ee9672 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_generate_lockfile.rs @@ -0,0 +1,184 @@ +use std::collections::{BTreeMap, HashSet}; + +use core::PackageId; +use core::registry::PackageRegistry; +use core::{Resolve, SourceId, Workspace}; +use core::resolver::Method; +use ops; +use util::config::Config; +use util::CargoResult; + +pub struct UpdateOptions<'a> { + pub config: &'a Config, + pub to_update: &'a [String], + pub precise: Option<&'a str>, + pub aggressive: bool, +} + +pub fn generate_lockfile(ws: &Workspace) -> CargoResult<()> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = ops::resolve_with_previous(&mut registry, ws, + Method::Everything, + None, None, &[], true)?; + ops::write_pkg_lockfile(ws, &resolve)?; + Ok(()) +} + +pub fn update_lockfile(ws: &Workspace, opts: &UpdateOptions) + -> CargoResult<()> { + + if opts.aggressive && opts.precise.is_some() { + bail!("cannot specify both aggressive and precise simultaneously") + } + + if ws.members().is_empty() { + bail!("you can't generate a lockfile for an empty workspace.") + } + + let previous_resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => return generate_lockfile(ws), + }; + let mut registry = PackageRegistry::new(opts.config)?; + let mut to_avoid = HashSet::new(); + + if opts.to_update.is_empty() { + to_avoid.extend(previous_resolve.iter()); + } else { + let mut sources = Vec::new(); + for name in opts.to_update { + let dep = previous_resolve.query(name)?; + if opts.aggressive { + fill_with_deps(&previous_resolve, dep, &mut to_avoid, + &mut HashSet::new()); + } else { + to_avoid.insert(dep); + sources.push(match opts.precise { + Some(precise) => { + // TODO: see comment in `resolve.rs` as well, but this + // seems like a pretty hokey reason to single out + // the registry as well. + let precise = if dep.source_id().is_registry() { + format!("{}={}", dep.name(), precise) + } else { + precise.to_string() + }; + dep.source_id().clone().with_precise(Some(precise)) + } + None => { + dep.source_id().clone().with_precise(None) + } + }); + } + } + registry.add_sources(&sources)?; + } + + let resolve = ops::resolve_with_previous(&mut registry, + ws, + Method::Everything, + Some(&previous_resolve), + Some(&to_avoid), + &[], + true)?; + + // Summarize what is changing for the user. + let print_change = |status: &str, msg: String| { + opts.config.shell().status(status, msg) + }; + for (removed, added) in compare_dependency_graphs(&previous_resolve, &resolve) { + if removed.len() == 1 && added.len() == 1 { + let msg = if removed[0].source_id().is_git() { + format!("{} -> #{}", removed[0], + &added[0].source_id().precise().unwrap()[..8]) + } else { + format!("{} -> v{}", removed[0], added[0].version()) + }; + print_change("Updating", msg)?; + } else { + for package in removed.iter() { + print_change("Removing", format!("{}", package))?; + } + for package in added.iter() { + print_change("Adding", format!("{}", package))?; + } + } + } + + ops::write_pkg_lockfile(ws, &resolve)?; + return Ok(()); + + fn fill_with_deps<'a>(resolve: &'a Resolve, dep: &'a PackageId, + set: &mut HashSet<&'a PackageId>, + visited: &mut HashSet<&'a PackageId>) { + if !visited.insert(dep) { + return + } + set.insert(dep); + for dep in resolve.deps(dep) { + fill_with_deps(resolve, dep, set, visited); + } + } + + fn compare_dependency_graphs<'a>(previous_resolve: &'a Resolve, + resolve: &'a Resolve) -> + Vec<(Vec<&'a PackageId>, Vec<&'a PackageId>)> { + fn key(dep: &PackageId) -> (&str, &SourceId) { + (dep.name(), dep.source_id()) + } + + // Removes all package ids in `b` from `a`. Note that this is somewhat + // more complicated because the equality for source ids does not take + // precise versions into account (e.g. git shas), but we want to take + // that into account here. + fn vec_subtract<'a>(a: &[&'a PackageId], + b: &[&'a PackageId]) -> Vec<&'a PackageId> { + a.iter().filter(|a| { + // If this package id is not found in `b`, then it's definitely + // in the subtracted set + let i = match b.binary_search(a) { + Ok(i) => i, + Err(..) => return true, + }; + + // If we've found `a` in `b`, then we iterate over all instances + // (we know `b` is sorted) and see if they all have different + // precise versions. If so, then `a` isn't actually in `b` so + // we'll let it through. + // + // Note that we only check this for non-registry sources, + // however, as registries contain enough version information in + // the package id to disambiguate + if a.source_id().is_registry() { + return false + } + b[i..].iter().take_while(|b| a == b).all(|b| { + a.source_id().precise() != b.source_id().precise() + }) + }).cloned().collect() + } + + // Map (package name, package source) to (removed versions, added versions). + let mut changes = BTreeMap::new(); + let empty = (Vec::new(), Vec::new()); + for dep in previous_resolve.iter() { + changes.entry(key(dep)).or_insert(empty.clone()).0.push(dep); + } + for dep in resolve.iter() { + changes.entry(key(dep)).or_insert(empty.clone()).1.push(dep); + } + + for v in changes.values_mut() { + let (ref mut old, ref mut new) = *v; + old.sort(); + new.sort(); + let removed = vec_subtract(old, new); + let added = vec_subtract(new, old); + *old = removed; + *new = added; + } + debug!("{:#?}", changes); + + changes.into_iter().map(|(_, v)| v).collect() + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_install.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_install.rs new file mode 100644 index 000000000..12fe51c44 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_install.rs @@ -0,0 +1,633 @@ +use std::collections::btree_map::Entry; +use std::collections::{BTreeMap, BTreeSet}; +use std::{env, fs}; +use std::io::prelude::*; +use std::io::SeekFrom; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use semver::{Version, VersionReq}; +use tempdir::TempDir; +use toml; + +use core::{SourceId, Source, Package, Dependency, PackageIdSpec}; +use core::{PackageId, Workspace}; +use ops::{self, CompileFilter, DefaultExecutor}; +use sources::{GitSource, PathSource, SourceConfigMap}; +use util::{Config, internal}; +use util::{Filesystem, FileLock}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +#[derive(Deserialize, Serialize)] +#[serde(untagged)] +enum CrateListing { + V1(CrateListingV1), + Empty(Empty), +} + +#[derive(Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +struct Empty {} + +#[derive(Deserialize, Serialize)] +struct CrateListingV1 { + v1: BTreeMap>, +} + +struct Transaction { + bins: Vec, +} + +impl Transaction { + fn success(mut self) { + self.bins.clear(); + } +} + +impl Drop for Transaction { + fn drop(&mut self) { + for bin in self.bins.iter() { + let _ = fs::remove_file(bin); + } + } +} + +pub fn install(root: Option<&str>, + krates: Vec<&str>, + source_id: &SourceId, + vers: Option<&str>, + opts: &ops::CompileOptions, + force: bool) -> CargoResult<()> { + let root = resolve_root(root, opts.config)?; + let map = SourceConfigMap::new(opts.config)?; + + let (installed_anything, scheduled_error) = if krates.len() <= 1 { + install_one(&root, &map, krates.into_iter().next(), source_id, vers, opts, + force, true)?; + (true, false) + } else { + let mut succeeded = vec![]; + let mut failed = vec![]; + let mut first = true; + for krate in krates { + let root = root.clone(); + let map = map.clone(); + match install_one(&root, &map, Some(krate), source_id, vers, + opts, force, first) { + Ok(()) => succeeded.push(krate), + Err(e) => { + ::handle_error(e, &mut opts.config.shell()); + failed.push(krate) + } + } + first = false; + } + + let mut summary = vec![]; + if !succeeded.is_empty() { + summary.push(format!("Successfully installed {}!", succeeded.join(", "))); + } + if !failed.is_empty() { + summary.push(format!("Failed to install {} (see error(s) above).", failed.join(", "))); + } + if !succeeded.is_empty() || !failed.is_empty() { + opts.config.shell().status("\nSummary:", summary.join(" "))?; + } + + (!succeeded.is_empty(), !failed.is_empty()) + }; + + if installed_anything { + // Print a warning that if this directory isn't in PATH that they won't be + // able to run these commands. + let dst = metadata(opts.config, &root)?.parent().join("bin"); + let path = env::var_os("PATH").unwrap_or_default(); + for path in env::split_paths(&path) { + if path == dst { + return Ok(()) + } + } + + opts.config.shell().warn(&format!("be sure to add `{}` to your PATH to be \ + able to run the installed binaries", + dst.display()))?; + } + + if scheduled_error { + bail!("some crates failed to install"); + } + + Ok(()) +} + +fn install_one(root: &Filesystem, + map: &SourceConfigMap, + krate: Option<&str>, + source_id: &SourceId, + vers: Option<&str>, + opts: &ops::CompileOptions, + force: bool, + is_first_install: bool) -> CargoResult<()> { + + let config = opts.config; + + let (pkg, source) = if source_id.is_git() { + select_pkg(GitSource::new(source_id, config)?, + krate, vers, config, is_first_install, + &mut |git| git.read_packages())? + } else if source_id.is_path() { + let path = source_id.url().to_file_path() + .map_err(|()| CargoError::from("path sources must have a valid path"))?; + let mut src = PathSource::new(&path, source_id, config); + src.update().chain_err(|| { + format!("`{}` is not a crate root; specify a crate to \ + install from crates.io, or use --path or --git to \ + specify an alternate source", path.display()) + })?; + select_pkg(PathSource::new(&path, source_id, config), + krate, vers, config, is_first_install, + &mut |path| path.read_packages())? + } else { + select_pkg(map.load(source_id)?, + krate, vers, config, is_first_install, + &mut |_| Err("must specify a crate to install from \ + crates.io, or use --path or --git to \ + specify alternate source".into()))? + }; + + let mut td_opt = None; + let overidden_target_dir = if source_id.is_path() { + None + } else if let Ok(td) = TempDir::new("cargo-install") { + let p = td.path().to_owned(); + td_opt = Some(td); + Some(Filesystem::new(p)) + } else { + Some(Filesystem::new(config.cwd().join("target-install"))) + }; + + let ws = match overidden_target_dir { + Some(dir) => Workspace::ephemeral(pkg, config, Some(dir), false)?, + None => Workspace::new(pkg.manifest_path(), config)?, + }; + let pkg = ws.current()?; + + config.shell().status("Installing", pkg)?; + + // Preflight checks to check up front whether we'll overwrite something. + // We have to check this again afterwards, but may as well avoid building + // anything if we're gonna throw it away anyway. + { + let metadata = metadata(config, root)?; + let list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + check_overwrites(&dst, pkg, &opts.filter, &list, force)?; + } + + let compile = ops::compile_ws(&ws, + Some(source), + opts, + Arc::new(DefaultExecutor)).chain_err(|| { + if let Some(td) = td_opt.take() { + // preserve the temporary directory, so the user can inspect it + td.into_path(); + } + + CargoError::from(format!("failed to compile `{}`, intermediate artifacts can be \ + found at `{}`", pkg, ws.target_dir().display())) + })?; + let binaries: Vec<(&str, &Path)> = compile.binaries.iter().map(|bin| { + let name = bin.file_name().unwrap(); + if let Some(s) = name.to_str() { + Ok((s, bin.as_ref())) + } else { + bail!("Binary `{:?}` name can't be serialized into string", name) + } + }).collect::>()?; + if binaries.is_empty() { + bail!("no binaries are available for install using the selected \ + features"); + } + + let metadata = metadata(config, root)?; + let mut list = read_crate_list(&metadata)?; + let dst = metadata.parent().join("bin"); + let duplicates = check_overwrites(&dst, pkg, &opts.filter, + &list, force)?; + + fs::create_dir_all(&dst)?; + + // Copy all binaries to a temporary directory under `dst` first, catching + // some failure modes (e.g. out of space) before touching the existing + // binaries. This directory will get cleaned up via RAII. + let staging_dir = TempDir::new_in(&dst, "cargo-install")?; + for &(bin, src) in binaries.iter() { + let dst = staging_dir.path().join(bin); + // Try to move if `target_dir` is transient. + if !source_id.is_path() && fs::rename(src, &dst).is_ok() { + continue + } + fs::copy(src, &dst).chain_err(|| { + format!("failed to copy `{}` to `{}`", src.display(), + dst.display()) + })?; + } + + let (to_replace, to_install): (Vec<&str>, Vec<&str>) = + binaries.iter().map(|&(bin, _)| bin) + .partition(|&bin| duplicates.contains_key(bin)); + + let mut installed = Transaction { bins: Vec::new() }; + + // Move the temporary copies into `dst` starting with new binaries. + for bin in to_install.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Installing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + format!("failed to move `{}` to `{}`", src.display(), + dst.display()) + })?; + installed.bins.push(dst); + } + + // Repeat for binaries which replace existing ones but don't pop the error + // up until after updating metadata. + let mut replaced_names = Vec::new(); + let result = { + let mut try_install = || -> CargoResult<()> { + for &bin in to_replace.iter() { + let src = staging_dir.path().join(bin); + let dst = dst.join(bin); + config.shell().status("Replacing", dst.display())?; + fs::rename(&src, &dst).chain_err(|| { + format!("failed to move `{}` to `{}`", src.display(), + dst.display()) + })?; + replaced_names.push(bin); + } + Ok(()) + }; + try_install() + }; + + // Update records of replaced binaries. + for &bin in replaced_names.iter() { + if let Some(&Some(ref p)) = duplicates.get(bin) { + if let Some(set) = list.v1.get_mut(p) { + set.remove(bin); + } + } + list.v1.entry(pkg.package_id().clone()) + .or_insert_with(|| BTreeSet::new()) + .insert(bin.to_string()); + } + + // Remove empty metadata lines. + let pkgs = list.v1.iter() + .filter_map(|(p, set)| if set.is_empty() { Some(p.clone()) } else { None }) + .collect::>(); + for p in pkgs.iter() { + list.v1.remove(p); + } + + // If installation was successful record newly installed binaries. + if result.is_ok() { + list.v1.entry(pkg.package_id().clone()) + .or_insert_with(|| BTreeSet::new()) + .extend(to_install.iter().map(|s| s.to_string())); + } + + let write_result = write_crate_list(&metadata, list); + match write_result { + // Replacement error (if any) isn't actually caused by write error + // but this seems to be the only way to show both. + Err(err) => result.chain_err(|| err)?, + Ok(_) => result?, + } + + // Reaching here means all actions have succeeded. Clean up. + installed.success(); + if !source_id.is_path() { + // Don't bother grabbing a lock as we're going to blow it all away + // anyway. + let target_dir = ws.target_dir().into_path_unlocked(); + fs::remove_dir_all(&target_dir)?; + } + + Ok(()) +} + +fn select_pkg<'a, T>(mut source: T, + name: Option<&str>, + vers: Option<&str>, + config: &Config, + needs_update: bool, + list_all: &mut FnMut(&mut T) -> CargoResult>) + -> CargoResult<(Package, Box)> + where T: Source + 'a +{ + if needs_update { + source.update()?; + } + + match name { + Some(name) => { + let vers = match vers { + Some(v) => { + + // If the version begins with character <, >, =, ^, ~ parse it as a + // version range, otherwise parse it as a specific version + let first = v.chars() + .nth(0) + .ok_or("no version provided for the `--vers` flag")?; + + match first { + '<' | '>' | '=' | '^' | '~' => match v.parse::() { + Ok(v) => Some(v.to_string()), + Err(_) => { + let msg = format!("the `--vers` provided, `{}`, is \ + not a valid semver version requirement\n\n + Please have a look at \ + http://doc.crates.io/specifying-dependencies.html \ + for the correct format", v); + return Err(msg.into()); + } + }, + _ => match v.parse::() { + Ok(v) => Some(format!("={}", v)), + Err(_) => { + let mut msg = format!("the `--vers` provided, `{}`, is \ + not a valid semver version\n\n\ + historically Cargo treated this \ + as a semver version requirement \ + accidentally\nand will continue \ + to do so, but this behavior \ + will be removed eventually", v); + + // If it is not a valid version but it is a valid version + // requirement, add a note to the warning + if v.parse::().is_ok() { + msg.push_str(&format!("\nif you want to specify semver range, \ + add an explicit qualifier, like ^{}", v)); + } + config.shell().warn(&msg)?; + Some(v.to_string()) + } + } + } + } + None => None, + }; + let vers = vers.as_ref().map(|s| &**s); + let dep = Dependency::parse_no_deprecated(name, vers, source.source_id())?; + let deps = source.query_vec(&dep)?; + match deps.iter().map(|p| p.package_id()).max() { + Some(pkgid) => { + let pkg = source.download(pkgid)?; + Ok((pkg, Box::new(source))) + } + None => { + let vers_info = vers.map(|v| format!(" with version `{}`", v)) + .unwrap_or_default(); + Err(format!("could not find `{}` in `{}`{}", name, + source.source_id(), vers_info).into()) + } + } + } + None => { + let candidates = list_all(&mut source)?; + let binaries = candidates.iter().filter(|cand| { + cand.targets().iter().filter(|t| t.is_bin()).count() > 0 + }); + let examples = candidates.iter().filter(|cand| { + cand.targets().iter().filter(|t| t.is_example()).count() > 0 + }); + let pkg = match one(binaries, |v| multi_err("binaries", v))? { + Some(p) => p, + None => { + match one(examples, |v| multi_err("examples", v))? { + Some(p) => p, + None => bail!("no packages found with binaries or \ + examples"), + } + } + }; + return Ok((pkg.clone(), Box::new(source))); + + fn multi_err(kind: &str, mut pkgs: Vec<&Package>) -> String { + pkgs.sort_by(|a, b| a.name().cmp(b.name())); + format!("multiple packages with {} found: {}", kind, + pkgs.iter().map(|p| p.name()).collect::>() + .join(", ")) + } + } + } +} + +fn one(mut i: I, f: F) -> CargoResult> + where I: Iterator, + F: FnOnce(Vec) -> String +{ + match (i.next(), i.next()) { + (Some(i1), Some(i2)) => { + let mut v = vec![i1, i2]; + v.extend(i); + Err(f(v).into()) + } + (Some(i), None) => Ok(Some(i)), + (None, _) => Ok(None) + } +} + +fn check_overwrites(dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1, + force: bool) -> CargoResult>> { + // If explicit --bin or --example flags were passed then those'll + // get checked during cargo_compile, we only care about the "build + // everything" case here + if !filter.is_specific() && !pkg.targets().iter().any(|t| t.is_bin()) { + bail!("specified package has no binaries") + } + let duplicates = find_duplicates(dst, pkg, filter, prev); + if force || duplicates.is_empty() { + return Ok(duplicates) + } + // Format the error message. + let mut msg = String::new(); + for (bin, p) in duplicates.iter() { + msg.push_str(&format!("binary `{}` already exists in destination", bin)); + if let Some(p) = p.as_ref() { + msg.push_str(&format!(" as part of `{}`\n", p)); + } else { + msg.push_str("\n"); + } + } + msg.push_str("Add --force to overwrite"); + Err(msg.into()) +} + +fn find_duplicates(dst: &Path, + pkg: &Package, + filter: &ops::CompileFilter, + prev: &CrateListingV1) -> BTreeMap> { + let check = |name: String| { + // Need to provide type, works around Rust Issue #93349 + let name = format!("{}{}", name, env::consts::EXE_SUFFIX); + if fs::metadata(dst.join(&name)).is_err() { + None + } else if let Some((p, _)) = prev.v1.iter().find(|&(_, v)| v.contains(&name)) { + Some((name, Some(p.clone()))) + } else { + Some((name, None)) + } + }; + match *filter { + CompileFilter::Default { .. } => { + pkg.targets().iter() + .filter(|t| t.is_bin()) + .filter_map(|t| check(t.name().to_string())) + .collect() + } + CompileFilter::Only { bins, examples, .. } => { + let all_bins: Vec = bins.try_collect().unwrap_or_else(|| { + pkg.targets().iter().filter(|t| t.is_bin()) + .map(|t| t.name().to_string()) + .collect() + }); + let all_examples: Vec = examples.try_collect().unwrap_or_else(|| { + pkg.targets().iter().filter(|t| t.is_bin_example()) + .map(|t| t.name().to_string()) + .collect() + }); + + all_bins.iter().chain(all_examples.iter()) + .filter_map(|t| check(t.clone())) + .collect::>>() + } + } +} + +fn read_crate_list(file: &FileLock) -> CargoResult { + (|| -> CargoResult<_> { + let mut contents = String::new(); + file.file().read_to_string(&mut contents)?; + let listing = toml::from_str(&contents).chain_err(|| { + internal("invalid TOML found for metadata") + })?; + match listing { + CrateListing::V1(v1) => Ok(v1), + CrateListing::Empty(_) => { + Ok(CrateListingV1 { v1: BTreeMap::new() }) + } + } + })().chain_err(|| { + format!("failed to parse crate metadata at `{}`", + file.path().to_string_lossy()) + }) +} + +fn write_crate_list(file: &FileLock, listing: CrateListingV1) -> CargoResult<()> { + (|| -> CargoResult<_> { + let mut file = file.file(); + file.seek(SeekFrom::Start(0))?; + file.set_len(0)?; + let data = toml::to_string(&CrateListing::V1(listing))?; + file.write_all(data.as_bytes())?; + Ok(()) + })().chain_err(|| { + format!("failed to write crate metadata at `{}`", + file.path().to_string_lossy()) + }) +} + +pub fn install_list(dst: Option<&str>, config: &Config) -> CargoResult<()> { + let dst = resolve_root(dst, config)?; + let dst = metadata(config, &dst)?; + let list = read_crate_list(&dst)?; + for (k, v) in list.v1.iter() { + println!("{}:", k); + for bin in v { + println!(" {}", bin); + } + } + Ok(()) +} + +pub fn uninstall(root: Option<&str>, + spec: &str, + bins: &[String], + config: &Config) -> CargoResult<()> { + let root = resolve_root(root, config)?; + let crate_metadata = metadata(config, &root)?; + let mut metadata = read_crate_list(&crate_metadata)?; + let mut to_remove = Vec::new(); + { + let result = PackageIdSpec::query_str(spec, metadata.v1.keys())? + .clone(); + let mut installed = match metadata.v1.entry(result.clone()) { + Entry::Occupied(e) => e, + Entry::Vacant(..) => panic!("entry not found: {}", result), + }; + let dst = crate_metadata.parent().join("bin"); + for bin in installed.get() { + let bin = dst.join(bin); + if fs::metadata(&bin).is_err() { + bail!("corrupt metadata, `{}` does not exist when it should", + bin.display()) + } + } + + let bins = bins.iter().map(|s| { + if s.ends_with(env::consts::EXE_SUFFIX) { + s.to_string() + } else { + format!("{}{}", s, env::consts::EXE_SUFFIX) + } + }).collect::>(); + + for bin in bins.iter() { + if !installed.get().contains(bin) { + bail!("binary `{}` not installed as part of `{}`", bin, result) + } + } + + if bins.is_empty() { + to_remove.extend(installed.get().iter().map(|b| dst.join(b))); + installed.get_mut().clear(); + } else { + for bin in bins.iter() { + to_remove.push(dst.join(bin)); + installed.get_mut().remove(bin); + } + } + if installed.get().is_empty() { + installed.remove(); + } + } + write_crate_list(&crate_metadata, metadata)?; + for bin in to_remove { + config.shell().status("Removing", bin.display())?; + fs::remove_file(bin)?; + } + + Ok(()) +} + +fn metadata(config: &Config, root: &Filesystem) -> CargoResult { + root.open_rw(Path::new(".crates.toml"), config, "crate metadata") +} + +fn resolve_root(flag: Option<&str>, + config: &Config) -> CargoResult { + let config_root = config.get_path("install.root")?; + Ok(flag.map(PathBuf::from).or_else(|| { + env::var_os("CARGO_INSTALL_ROOT").map(PathBuf::from) + }).or_else(move || { + config_root.map(|v| v.val) + }).map(Filesystem::new).unwrap_or_else(|| { + config.home().clone() + })) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_new.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_new.rs new file mode 100644 index 000000000..9c597df85 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_new.rs @@ -0,0 +1,607 @@ +use std::collections::BTreeMap; +use std::env; +use std::fs; +use std::path::Path; + +use serde::{Deserialize, Deserializer}; +use serde::de; + +use git2::Config as GitConfig; +use git2::Repository as GitRepository; + +use core::Workspace; +use ops::is_bad_artifact_name; +use util::{GitRepo, HgRepo, PijulRepo, FossilRepo, internal}; +use util::{Config, paths}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +use toml; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum VersionControl { Git, Hg, Pijul, Fossil, NoVcs } + +pub struct NewOptions<'a> { + pub version_control: Option, + pub bin: bool, + pub lib: bool, + pub path: &'a str, + pub name: Option<&'a str>, +} + +struct SourceFileInformation { + relative_path: String, + target_name: String, + bin: bool, +} + +struct MkOptions<'a> { + version_control: Option, + path: &'a Path, + name: &'a str, + source_files: Vec, + bin: bool, +} + +impl<'de> Deserialize<'de> for VersionControl { + fn deserialize>(d: D) -> Result { + Ok(match &String::deserialize(d)?[..] { + "git" => VersionControl::Git, + "hg" => VersionControl::Hg, + "pijul" => VersionControl::Pijul, + "fossil" => VersionControl::Fossil, + "none" => VersionControl::NoVcs, + n => { + let value = de::Unexpected::Str(n); + let msg = "unsupported version control system"; + return Err(de::Error::invalid_value(value, &msg)); + } + }) + } +} + +impl<'a> NewOptions<'a> { + pub fn new(version_control: Option, + bin: bool, + lib: bool, + path: &'a str, + name: Option<&'a str>) -> NewOptions<'a> { + + // default to lib + let is_lib = if !bin { + true + } + else { + lib + }; + + NewOptions { + version_control: version_control, + bin: bin, + lib: is_lib, + path: path, + name: name, + } + } +} + +struct CargoNewConfig { + name: Option, + email: Option, + version_control: Option, +} + +fn get_name<'a>(path: &'a Path, opts: &'a NewOptions, config: &Config) -> CargoResult<&'a str> { + if let Some(name) = opts.name { + return Ok(name); + } + + if path.file_name().is_none() { + bail!("cannot auto-detect project name from path {:?} ; use --name to override", + path.as_os_str()); + } + + let dir_name = path.file_name().and_then(|s| s.to_str()).ok_or_else(|| { + CargoError::from(format!("cannot create a project with a non-unicode name: {:?}", + path.file_name().unwrap())) + })?; + + if opts.bin { + Ok(dir_name) + } else { + let new_name = strip_rust_affixes(dir_name); + if new_name != dir_name { + writeln!(config.shell().err(), + "note: package will be named `{}`; use --name to override", + new_name)?; + } + Ok(new_name) + } +} + +fn check_name(name: &str, is_bin: bool) -> CargoResult<()> { + + // Ban keywords + test list found at + // https://doc.rust-lang.org/grammar.html#keywords + let blacklist = ["abstract", "alignof", "as", "become", "box", + "break", "const", "continue", "crate", "do", + "else", "enum", "extern", "false", "final", + "fn", "for", "if", "impl", "in", + "let", "loop", "macro", "match", "mod", + "move", "mut", "offsetof", "override", "priv", + "proc", "pub", "pure", "ref", "return", + "self", "sizeof", "static", "struct", + "super", "test", "trait", "true", "type", "typeof", + "unsafe", "unsized", "use", "virtual", "where", + "while", "yield"]; + if blacklist.contains(&name) || (is_bin && is_bad_artifact_name(name)) { + bail!("The name `{}` cannot be used as a crate name\n\ + use --name to override crate name", + name) + } + + if let Some(ref c) = name.chars().nth(0) { + if c.is_digit(10) { + bail!("Package names starting with a digit cannot be used as a crate name\n\ + use --name to override crate name") + } + } + + for c in name.chars() { + if c.is_alphanumeric() { continue } + if c == '_' || c == '-' { continue } + bail!("Invalid character `{}` in crate name: `{}`\n\ + use --name to override crate name", + c, name) + } + Ok(()) +} + +fn detect_source_paths_and_types(project_path : &Path, + project_name: &str, + detected_files: &mut Vec, + ) -> CargoResult<()> { + let path = project_path; + let name = project_name; + + enum H { + Bin, + Lib, + Detect, + } + + struct Test { + proposed_path: String, + handling: H, + } + + let tests = vec![ + Test { proposed_path: format!("src/main.rs"), handling: H::Bin }, + Test { proposed_path: format!("main.rs"), handling: H::Bin }, + Test { proposed_path: format!("src/{}.rs", name), handling: H::Detect }, + Test { proposed_path: format!("{}.rs", name), handling: H::Detect }, + Test { proposed_path: format!("src/lib.rs"), handling: H::Lib }, + Test { proposed_path: format!("lib.rs"), handling: H::Lib }, + ]; + + for i in tests { + let pp = i.proposed_path; + + // path/pp does not exist or is not a file + if !fs::metadata(&path.join(&pp)).map(|x| x.is_file()).unwrap_or(false) { + continue; + } + + let sfi = match i.handling { + H::Bin => { + SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: true + } + } + H::Lib => { + SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: false + } + } + H::Detect => { + let content = paths::read(&path.join(pp.clone()))?; + let isbin = content.contains("fn main"); + SourceFileInformation { + relative_path: pp, + target_name: project_name.to_string(), + bin: isbin + } + } + }; + detected_files.push(sfi); + } + + // Check for duplicate lib attempt + + let mut previous_lib_relpath : Option<&str> = None; + let mut duplicates_checker : BTreeMap<&str, &SourceFileInformation> = BTreeMap::new(); + + for i in detected_files { + if i.bin { + if let Some(x) = BTreeMap::get::(&duplicates_checker, i.target_name.as_ref()) { + bail!("\ +multiple possible binary sources found: + {} + {} +cannot automatically generate Cargo.toml as the main target would be ambiguous", + &x.relative_path, &i.relative_path); + } + duplicates_checker.insert(i.target_name.as_ref(), i); + } else { + if let Some(plp) = previous_lib_relpath { + return Err(format!("cannot have a project with \ + multiple libraries, \ + found both `{}` and `{}`", + plp, i.relative_path).into()); + } + previous_lib_relpath = Some(&i.relative_path); + } + } + + Ok(()) +} + +fn plan_new_source_file(bin: bool, project_name: String) -> SourceFileInformation { + if bin { + SourceFileInformation { + relative_path: "src/main.rs".to_string(), + target_name: project_name, + bin: true, + } + } else { + SourceFileInformation { + relative_path: "src/lib.rs".to_string(), + target_name: project_name, + bin: false, + } + } +} + +pub fn new(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = config.cwd().join(opts.path); + if fs::metadata(&path).is_ok() { + bail!("destination `{}` already exists\n\n\ + Use `cargo init` to initialize the directory\ + ", path.display() + ) + } + + if opts.lib && opts.bin { + bail!("can't specify both lib and binary outputs") + } + + let name = get_name(&path, opts, config)?; + check_name(name, opts.bin)?; + + let mkopts = MkOptions { + version_control: opts.version_control, + path: &path, + name: name, + source_files: vec![plan_new_source_file(opts.bin, name.to_string())], + bin: opts.bin, + }; + + mk(config, &mkopts).chain_err(|| { + format!("Failed to create project `{}` at `{}`", + name, path.display()) + }) +} + +pub fn init(opts: &NewOptions, config: &Config) -> CargoResult<()> { + let path = config.cwd().join(opts.path); + + let cargotoml_path = path.join("Cargo.toml"); + if fs::metadata(&cargotoml_path).is_ok() { + bail!("`cargo init` cannot be run on existing Cargo projects") + } + + if opts.lib && opts.bin { + bail!("can't specify both lib and binary outputs"); + } + + let name = get_name(&path, opts, config)?; + check_name(name, opts.bin)?; + + let mut src_paths_types = vec![]; + + detect_source_paths_and_types(&path, name, &mut src_paths_types)?; + + if src_paths_types.is_empty() { + src_paths_types.push(plan_new_source_file(opts.bin, name.to_string())); + } else { + // --bin option may be ignored if lib.rs or src/lib.rs present + // Maybe when doing `cargo init --bin` inside a library project stub, + // user may mean "initialize for library, but also add binary target" + } + + let mut version_control = opts.version_control; + + if version_control == None { + let mut num_detected_vsces = 0; + + if fs::metadata(&path.join(".git")).is_ok() { + version_control = Some(VersionControl::Git); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".hg")).is_ok() { + version_control = Some(VersionControl::Hg); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".pijul")).is_ok() { + version_control = Some(VersionControl::Pijul); + num_detected_vsces += 1; + } + + if fs::metadata(&path.join(".fossil")).is_ok() { + version_control = Some(VersionControl::Fossil); + num_detected_vsces += 1; + } + + // if none exists, maybe create git, like in `cargo new` + + if num_detected_vsces > 1 { + bail!("more than one of .hg, .git, .pijul, .fossil configurations \ + found and the ignore file can't be filled in as \ + a result. specify --vcs to override detection"); + } + } + + let mkopts = MkOptions { + version_control: version_control, + path: &path, + name: name, + bin: src_paths_types.iter().any(|x|x.bin), + source_files: src_paths_types, + }; + + mk(config, &mkopts).chain_err(|| { + format!("Failed to create project `{}` at `{}`", + name, path.display()) + }) +} + +fn strip_rust_affixes(name: &str) -> &str { + for &prefix in &["rust-", "rust_", "rs-", "rs_"] { + if name.starts_with(prefix) { + return &name[prefix.len()..]; + } + } + for &suffix in &["-rust", "_rust", "-rs", "_rs"] { + if name.ends_with(suffix) { + return &name[..name.len()-suffix.len()]; + } + } + name +} + +fn existing_vcs_repo(path: &Path, cwd: &Path) -> bool { + GitRepo::discover(path, cwd).is_ok() || HgRepo::discover(path, cwd).is_ok() +} + +fn mk(config: &Config, opts: &MkOptions) -> CargoResult<()> { + let path = opts.path; + let name = opts.name; + let cfg = global_config(config)?; + // Please ensure that ignore and hgignore are in sync. + let ignore = ["/target/\n", "**/*.rs.bk\n", + if !opts.bin { "Cargo.lock\n" } else { "" }] + .concat(); + // Mercurial glob ignores can't be rooted, so just sticking a 'syntax: glob' at the top of the + // file will exclude too much. Instead, use regexp-based ignores. See 'hg help ignore' for + // more. + let hgignore = ["^target/\n", "glob:*.rs.bk\n", + if !opts.bin { "glob:Cargo.lock\n" } else { "" }] + .concat(); + + let in_existing_vcs_repo = existing_vcs_repo(path.parent().unwrap_or(path), config.cwd()); + let vcs = match (opts.version_control, cfg.version_control, in_existing_vcs_repo) { + (None, None, false) => VersionControl::Git, + (None, Some(option), false) => option, + (Some(option), _, _) => option, + (_, _, true) => VersionControl::NoVcs, + }; + match vcs { + VersionControl::Git => { + if !fs::metadata(&path.join(".git")).is_ok() { + GitRepo::init(path, config.cwd())?; + } + paths::append(&path.join(".gitignore"), ignore.as_bytes())?; + }, + VersionControl::Hg => { + if !fs::metadata(&path.join(".hg")).is_ok() { + HgRepo::init(path, config.cwd())?; + } + paths::append(&path.join(".hgignore"), hgignore.as_bytes())?; + }, + VersionControl::Pijul => { + if !fs::metadata(&path.join(".pijul")).is_ok() { + PijulRepo::init(path, config.cwd())?; + } + }, + VersionControl::Fossil => { + if !fs::metadata(&path.join(".fossil")).is_ok() { + FossilRepo::init(path, config.cwd())?; + } + }, + VersionControl::NoVcs => { + fs::create_dir_all(path)?; + }, + }; + + let (author_name, email) = discover_author()?; + // Hoo boy, sure glad we've got exhaustiveness checking behind us. + let author = match (cfg.name, cfg.email, author_name, email) { + (Some(name), Some(email), _, _) | + (Some(name), None, _, Some(email)) | + (None, Some(email), name, _) | + (None, None, name, Some(email)) => format!("{} <{}>", name, email), + (Some(name), None, _, None) | + (None, None, name, None) => name, + }; + + let mut cargotoml_path_specifier = String::new(); + + // Calculate what [lib] and [[bin]]s do we need to append to Cargo.toml + + for i in &opts.source_files { + if i.bin { + if i.relative_path != "src/main.rs" { + cargotoml_path_specifier.push_str(&format!(r#" +[[bin]] +name = "{}" +path = {} +"#, i.target_name, toml::Value::String(i.relative_path.clone()))); + } + } else if i.relative_path != "src/lib.rs" { + cargotoml_path_specifier.push_str(&format!(r#" +[lib] +name = "{}" +path = {} +"#, i.target_name, toml::Value::String(i.relative_path.clone()))); + } + } + + // Create Cargo.toml file with necessary [lib] and [[bin]] sections, if needed + + paths::write(&path.join("Cargo.toml"), format!( +r#"[package] +name = "{}" +version = "0.1.0" +authors = [{}] + +[dependencies] +{}"#, name, toml::Value::String(author), cargotoml_path_specifier).as_bytes())?; + + + // Create all specified source files + // (with respective parent directories) + // if they are don't exist + + for i in &opts.source_files { + let path_of_source_file = path.join(i.relative_path.clone()); + + if let Some(src_dir) = path_of_source_file.parent() { + fs::create_dir_all(src_dir)?; + } + + let default_file_content : &[u8] = if i.bin { + b"\ +fn main() { + println!(\"Hello, world!\"); +} +" + } else { + b"\ +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} +" + }; + + if !fs::metadata(&path_of_source_file).map(|x| x.is_file()).unwrap_or(false) { + paths::write(&path_of_source_file, default_file_content)?; + } + } + + if let Err(e) = Workspace::new(&path.join("Cargo.toml"), config) { + let msg = format!("compiling this new crate may not work due to invalid \ + workspace configuration\n\n{}", e); + config.shell().warn(msg)?; + } + + Ok(()) +} + +fn get_environment_variable(variables: &[&str] ) -> Option{ + variables.iter() + .filter_map(|var| env::var(var).ok()) + .next() +} + +fn discover_author() -> CargoResult<(String, Option)> { + let cwd = env::current_dir()?; + let git_config = if let Ok(repo) = GitRepository::discover(&cwd) { + repo.config().ok().or_else(|| GitConfig::open_default().ok()) + } else { + GitConfig::open_default().ok() + }; + let git_config = git_config.as_ref(); + let name_variables = ["CARGO_NAME", "GIT_AUTHOR_NAME", "GIT_COMMITTER_NAME", + "USER", "USERNAME", "NAME"]; + let name = get_environment_variable(&name_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.name").ok())) + .or_else(|| get_environment_variable(&name_variables[3..])); + + let name = match name { + Some(name) => name, + None => { + let username_var = if cfg!(windows) {"USERNAME"} else {"USER"}; + bail!("could not determine the current user, please set ${}", + username_var) + } + }; + let email_variables = ["CARGO_EMAIL", "GIT_AUTHOR_EMAIL", "GIT_COMMITTER_EMAIL", + "EMAIL"]; + let email = get_environment_variable(&email_variables[0..3]) + .or_else(|| git_config.and_then(|g| g.get_string("user.email").ok())) + .or_else(|| get_environment_variable(&email_variables[3..])); + + let name = name.trim().to_string(); + let email = email.map(|s| s.trim().to_string()); + + Ok((name, email)) +} + +fn global_config(config: &Config) -> CargoResult { + let name = config.get_string("cargo-new.name")?.map(|s| s.val); + let email = config.get_string("cargo-new.email")?.map(|s| s.val); + let vcs = config.get_string("cargo-new.vcs")?; + + let vcs = match vcs.as_ref().map(|p| (&p.val[..], &p.definition)) { + Some(("git", _)) => Some(VersionControl::Git), + Some(("hg", _)) => Some(VersionControl::Hg), + Some(("none", _)) => Some(VersionControl::NoVcs), + Some((s, p)) => { + return Err(internal(format!("invalid configuration for key \ + `cargo-new.vcs`, unknown vcs `{}` \ + (found in {})", s, p))) + } + None => None + }; + Ok(CargoNewConfig { + name: name, + email: email, + version_control: vcs, + }) +} + +#[cfg(test)] +mod tests { + use super::strip_rust_affixes; + + #[test] + fn affixes_stripped() { + assert_eq!(strip_rust_affixes("rust-foo"), "foo"); + assert_eq!(strip_rust_affixes("foo-rs"), "foo"); + assert_eq!(strip_rust_affixes("rs_foo"), "foo"); + // Only one affix is stripped + assert_eq!(strip_rust_affixes("rs-foo-rs"), "foo-rs"); + assert_eq!(strip_rust_affixes("foo-rs-rs"), "foo-rs"); + // It shouldn't touch the middle + assert_eq!(strip_rust_affixes("some-rust-crate"), "some-rust-crate"); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_output_metadata.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_output_metadata.rs new file mode 100644 index 000000000..1dca07451 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_output_metadata.rs @@ -0,0 +1,106 @@ +use serde::ser::{self, Serialize}; + +use core::resolver::Resolve; +use core::{Package, PackageId, Workspace}; +use ops::{self, Packages}; +use util::CargoResult; + +const VERSION: u32 = 1; + +pub struct OutputMetadataOptions { + pub features: Vec, + pub no_default_features: bool, + pub all_features: bool, + pub no_deps: bool, + pub version: u32, +} + +/// Loads the manifest, resolves the dependencies of the project to the concrete +/// used versions - considering overrides - and writes all dependencies in a JSON +/// format to stdout. +pub fn output_metadata(ws: &Workspace, + opt: &OutputMetadataOptions) -> CargoResult { + if opt.version != VERSION { + bail!("metadata version {} not supported, only {} is currently supported", + opt.version, VERSION); + } + if opt.no_deps { + metadata_no_deps(ws, opt) + } else { + metadata_full(ws, opt) + } +} + +fn metadata_no_deps(ws: &Workspace, + _opt: &OutputMetadataOptions) -> CargoResult { + Ok(ExportInfo { + packages: ws.members().cloned().collect(), + workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(), + resolve: None, + target_directory: ws.target_dir().display().to_string(), + version: VERSION, + }) +} + +fn metadata_full(ws: &Workspace, + opt: &OutputMetadataOptions) -> CargoResult { + let specs = Packages::All.into_package_id_specs(ws)?; + let deps = ops::resolve_ws_precisely(ws, + None, + &opt.features, + opt.all_features, + opt.no_default_features, + &specs)?; + let (packages, resolve) = deps; + + let packages = packages.package_ids() + .map(|i| packages.get(i).map(|p| p.clone())) + .collect::>>()?; + + Ok(ExportInfo { + packages: packages, + workspace_members: ws.members().map(|pkg| pkg.package_id().clone()).collect(), + resolve: Some(MetadataResolve{ + resolve: resolve, + root: ws.current_opt().map(|pkg| pkg.package_id().clone()), + }), + target_directory: ws.target_dir().display().to_string(), + version: VERSION, + }) +} + +#[derive(Serialize)] +pub struct ExportInfo { + packages: Vec, + workspace_members: Vec, + resolve: Option, + target_directory: String, + version: u32, +} + +/// Newtype wrapper to provide a custom `Serialize` implementation. +/// The one from lockfile does not fit because it uses a non-standard +/// format for `PackageId`s +#[derive(Serialize)] +struct MetadataResolve { + #[serde(rename = "nodes", serialize_with = "serialize_resolve")] + resolve: Resolve, + root: Option, +} + +fn serialize_resolve(resolve: &Resolve, s: S) -> Result + where S: ser::Serializer, +{ + #[derive(Serialize)] + struct Node<'a> { + id: &'a PackageId, + dependencies: Vec<&'a PackageId>, + } + + resolve.iter().map(|id| { + Node { + id: id, + dependencies: resolve.deps(id).collect(), + } + }).collect::>().serialize(s) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_package.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_package.rs new file mode 100644 index 000000000..f12ce12f3 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_package.rs @@ -0,0 +1,340 @@ +use std::fs::{self, File}; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::path::{self, Path}; +use std::sync::Arc; + +use flate2::read::GzDecoder; +use flate2::{GzBuilder, Compression}; +use git2; +use tar::{Archive, Builder, Header, EntryType}; + +use core::{Package, Workspace, Source, SourceId}; +use sources::PathSource; +use util::{self, internal, Config, FileLock}; +use util::errors::{CargoResult, CargoResultExt}; +use ops::{self, DefaultExecutor}; + +pub struct PackageOpts<'cfg> { + pub config: &'cfg Config, + pub list: bool, + pub check_metadata: bool, + pub allow_dirty: bool, + pub verify: bool, + pub jobs: Option, + pub target: Option<&'cfg str>, +} + +pub fn package(ws: &Workspace, + opts: &PackageOpts) -> CargoResult> { + let pkg = ws.current()?; + let config = ws.config(); + if !pkg.manifest().features().activated().is_empty() { + bail!("cannot package or publish crates which activate nightly-only \ + cargo features") + } + let mut src = PathSource::new(pkg.root(), + pkg.package_id().source_id(), + config); + src.update()?; + + if opts.check_metadata { + check_metadata(pkg, config)?; + } + + verify_dependencies(pkg)?; + + if opts.list { + let root = pkg.root(); + let mut list: Vec<_> = src.list_files(pkg)?.iter().map(|file| { + util::without_prefix(file, root).unwrap().to_path_buf() + }).collect(); + list.sort(); + for file in list.iter() { + println!("{}", file.display()); + } + return Ok(None) + } + + if !opts.allow_dirty { + check_not_dirty(pkg, &src)?; + } + + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let dir = ws.target_dir().join("package"); + let mut dst = { + let tmp = format!(".{}", filename); + dir.open_rw(&tmp, config, "package scratch space")? + }; + + // Package up and test a temporary tarball and only move it to the final + // location if it actually passes all our tests. Any previously existing + // tarball can be assumed as corrupt or invalid, so we just blow it away if + // it exists. + config.shell().status("Packaging", pkg.package_id().to_string())?; + dst.file().set_len(0)?; + tar(ws, &src, dst.file(), &filename).chain_err(|| { + "failed to prepare local package for uploading" + })?; + if opts.verify { + dst.seek(SeekFrom::Start(0))?; + run_verify(ws, dst.file(), opts).chain_err(|| { + "failed to verify package tarball" + })? + } + dst.seek(SeekFrom::Start(0))?; + { + let src_path = dst.path(); + let dst_path = dst.parent().join(&filename); + fs::rename(&src_path, &dst_path).chain_err(|| { + "failed to move temporary tarball into final location" + })?; + } + Ok(Some(dst)) +} + +// check that the package has some piece of metadata that a human can +// use to tell what the package is about. +fn check_metadata(pkg: &Package, config: &Config) -> CargoResult<()> { + let md = pkg.manifest().metadata(); + + let mut missing = vec![]; + + macro_rules! lacking { + ($( $($field: ident)||* ),*) => {{ + $( + if $(md.$field.as_ref().map_or(true, |s| s.is_empty()))&&* { + $(missing.push(stringify!($field).replace("_", "-"));)* + } + )* + }} + } + lacking!(description, license || license_file, documentation || homepage || repository); + + if !missing.is_empty() { + let mut things = missing[..missing.len() - 1].join(", "); + // things will be empty if and only if length == 1 (i.e. the only case + // to have no `or`). + if !things.is_empty() { + things.push_str(" or "); + } + things.push_str(missing.last().unwrap()); + + config.shell().warn( + &format!("manifest has no {things}.\n\ + See http://doc.crates.io/manifest.html#package-metadata for more info.", + things = things))? + } + Ok(()) +} + +// check that the package dependencies are safe to deploy. +fn verify_dependencies(pkg: &Package) -> CargoResult<()> { + for dep in pkg.dependencies() { + if dep.source_id().is_path() && !dep.specified_req() { + bail!("all path dependencies must have a version specified \ + when packaging.\ndependency `{}` does not specify \ + a version.", dep.name()) + } + } + Ok(()) +} + +fn check_not_dirty(p: &Package, src: &PathSource) -> CargoResult<()> { + if let Ok(repo) = git2::Repository::discover(p.root()) { + if let Some(workdir) = repo.workdir() { + debug!("found a git repo at {:?}, checking if index present", + workdir); + let path = p.manifest_path(); + let path = path.strip_prefix(workdir).unwrap_or(path); + if let Ok(status) = repo.status_file(path) { + if (status & git2::STATUS_IGNORED).is_empty() { + debug!("Cargo.toml found in repo, checking if dirty"); + return git(p, src, &repo) + } + } + } + } + + // No VCS recognized, we don't know if the directory is dirty or not, so we + // have to assume that it's clean. + return Ok(()); + + fn git(p: &Package, + src: &PathSource, + repo: &git2::Repository) -> CargoResult<()> { + let workdir = repo.workdir().unwrap(); + let dirty = src.list_files(p)?.iter().filter(|file| { + let relative = file.strip_prefix(workdir).unwrap(); + if let Ok(status) = repo.status_file(relative) { + status != git2::STATUS_CURRENT + } else { + false + } + }).map(|path| { + path.strip_prefix(p.root()).unwrap_or(path).display().to_string() + }).collect::>(); + if dirty.is_empty() { + Ok(()) + } else { + bail!("{} files in the working directory contain changes that were \ + not yet committed into git:\n\n{}\n\n\ + to proceed despite this, pass the `--allow-dirty` flag", + dirty.len(), dirty.join("\n")) + } + } +} + +fn tar(ws: &Workspace, + src: &PathSource, + dst: &File, + filename: &str) -> CargoResult<()> { + // Prepare the encoder and its header + let filename = Path::new(filename); + let encoder = GzBuilder::new().filename(util::path2bytes(filename)?) + .write(dst, Compression::Best); + + // Put all package files into a compressed archive + let mut ar = Builder::new(encoder); + let pkg = ws.current()?; + let config = ws.config(); + let root = pkg.root(); + for file in src.list_files(pkg)?.iter() { + let relative = util::without_prefix(file, root).unwrap(); + check_filename(relative)?; + let relative = relative.to_str().ok_or_else(|| { + format!("non-utf8 path in source directory: {}", + relative.display()) + })?; + config.shell().verbose(|shell| { + shell.status("Archiving", &relative) + })?; + let path = format!("{}-{}{}{}", pkg.name(), pkg.version(), + path::MAIN_SEPARATOR, relative); + + // The tar::Builder type by default will build GNU archives, but + // unfortunately we force it here to use UStar archives instead. The + // UStar format has more limitations on the length of path name that it + // can encode, so it's not quite as nice to use. + // + // Older cargos, however, had a bug where GNU archives were interpreted + // as UStar archives. This bug means that if we publish a GNU archive + // which has fully filled out metadata it'll be corrupt when unpacked by + // older cargos. + // + // Hopefully in the future after enough cargos have been running around + // with the bugfixed tar-rs library we'll be able to switch this over to + // GNU archives, but for now we'll just say that you can't encode paths + // in archives that are *too* long. + // + // For an instance of this in the wild, use the tar-rs 0.3.3 library to + // unpack the selectors 0.4.0 crate on crates.io. Either that or take a + // look at rust-lang/cargo#2326 + let mut header = Header::new_ustar(); + header.set_path(&path).chain_err(|| { + format!("failed to add to archive: `{}`", relative) + })?; + let mut file = File::open(file).chain_err(|| { + format!("failed to open for archiving: `{}`", file.display()) + })?; + let metadata = file.metadata().chain_err(|| { + format!("could not learn metadata for: `{}`", relative) + })?; + header.set_metadata(&metadata); + + if relative == "Cargo.toml" { + let orig = Path::new(&path).with_file_name("Cargo.toml.orig"); + header.set_path(&orig)?; + header.set_cksum(); + ar.append(&header, &mut file).chain_err(|| { + internal(format!("could not archive source file `{}`", relative)) + })?; + + let mut header = Header::new_ustar(); + let toml = pkg.to_registry_toml(); + header.set_path(&path)?; + header.set_entry_type(EntryType::file()); + header.set_mode(0o644); + header.set_size(toml.len() as u64); + header.set_cksum(); + ar.append(&header, toml.as_bytes()).chain_err(|| { + internal(format!("could not archive source file `{}`", relative)) + })?; + } else { + header.set_cksum(); + ar.append(&header, &mut file).chain_err(|| { + internal(format!("could not archive source file `{}`", relative)) + })?; + } + } + let encoder = ar.into_inner()?; + encoder.finish()?; + Ok(()) +} + +fn run_verify(ws: &Workspace, tar: &File, opts: &PackageOpts) -> CargoResult<()> { + let config = ws.config(); + let pkg = ws.current()?; + + config.shell().status("Verifying", pkg)?; + + let f = GzDecoder::new(tar)?; + let dst = pkg.root().join(&format!("target/package/{}-{}", + pkg.name(), pkg.version())); + if fs::metadata(&dst).is_ok() { + fs::remove_dir_all(&dst)?; + } + let mut archive = Archive::new(f); + archive.unpack(dst.parent().unwrap())?; + + // Manufacture an ephemeral workspace to ensure that even if the top-level + // package has a workspace we can still build our new crate. + let id = SourceId::for_path(&dst)?; + let mut src = PathSource::new(&dst, &id, ws.config()); + let new_pkg = src.root_package()?; + let ws = Workspace::ephemeral(new_pkg, config, None, true)?; + + ops::compile_ws(&ws, None, &ops::CompileOptions { + config: config, + jobs: opts.jobs, + target: opts.target, + features: &[], + no_default_features: false, + all_features: false, + spec: ops::Packages::Packages(&[]), + filter: ops::CompileFilter::Default { required_features_filterable: true }, + release: false, + message_format: ops::MessageFormat::Human, + mode: ops::CompileMode::Build, + target_rustdoc_args: None, + target_rustc_args: None, + }, Arc::new(DefaultExecutor))?; + + Ok(()) +} + +// It can often be the case that files of a particular name on one platform +// can't actually be created on another platform. For example files with colons +// in the name are allowed on Unix but not on Windows. +// +// To help out in situations like this, issue about weird filenames when +// packaging as a "heads up" that something may not work on other platforms. +fn check_filename(file: &Path) -> CargoResult<()> { + let name = match file.file_name() { + Some(name) => name, + None => return Ok(()), + }; + let name = match name.to_str() { + Some(name) => name, + None => { + bail!("path does not have a unicode filename which may not unpack \ + on all platforms: {}", file.display()) + } + }; + let bad_chars = ['/', '\\', '<', '>', ':', '"', '|', '?', '*']; + if let Some(c) = bad_chars.iter().find(|c| name.contains(**c)) { + bail!("cannot package a filename with a special character `{}`: {}", + c, file.display()) + } + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_pkgid.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_pkgid.rs new file mode 100644 index 000000000..0461bc4c8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_pkgid.rs @@ -0,0 +1,16 @@ +use ops; +use core::{PackageIdSpec, Workspace}; +use util::CargoResult; + +pub fn pkgid(ws: &Workspace, spec: Option<&str>) -> CargoResult { + let resolve = match ops::load_pkg_lockfile(ws)? { + Some(resolve) => resolve, + None => bail!("a Cargo.lock must exist for this command"), + }; + + let pkgid = match spec { + Some(spec) => PackageIdSpec::query_str(spec, resolve.iter())?, + None => ws.current()?.package_id(), + }; + Ok(PackageIdSpec::from_package_id(pkgid)) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_read_manifest.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_read_manifest.rs new file mode 100644 index 000000000..19d9f6aef --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_read_manifest.rs @@ -0,0 +1,165 @@ +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use core::{Package, SourceId, PackageId, EitherManifest}; +use util::{self, Config}; +use util::errors::{CargoResult, CargoResultExt, CargoError}; +use util::important_paths::find_project_manifest_exact; +use util::toml::read_manifest; + +pub fn read_package(path: &Path, source_id: &SourceId, config: &Config) + -> CargoResult<(Package, Vec)> { + trace!("read_package; path={}; source-id={}", path.display(), source_id); + let (manifest, nested) = read_manifest(path, source_id, config)?; + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => { + bail!("found a virtual manifest at `{}` instead of a package \ + manifest", path.display()) + } + }; + + Ok((Package::new(manifest, path), nested)) +} + +pub fn read_packages(path: &Path, source_id: &SourceId, config: &Config) + -> CargoResult> { + let mut all_packages = HashMap::new(); + let mut visited = HashSet::::new(); + let mut errors = Vec::::new(); + + trace!("looking for root package: {}, source_id={}", path.display(), source_id); + + walk(path, &mut |dir| { + trace!("looking for child package: {}", dir.display()); + + // Don't recurse into hidden/dot directories unless we're at the toplevel + if dir != path { + let name = dir.file_name().and_then(|s| s.to_str()); + if name.map(|s| s.starts_with('.')) == Some(true) { + return Ok(false) + } + + // Don't automatically discover packages across git submodules + if fs::metadata(&dir.join(".git")).is_ok() { + return Ok(false) + } + } + + // Don't ever look at target directories + if dir.file_name().and_then(|s| s.to_str()) == Some("target") && + has_manifest(dir.parent().unwrap()) { + return Ok(false) + } + + if has_manifest(dir) { + read_nested_packages(dir, &mut all_packages, source_id, config, + &mut visited, &mut errors)?; + } + Ok(true) + })?; + + if all_packages.is_empty() { + match errors.pop() { + Some(err) => Err(err), + None => Err(format!("Could not find Cargo.toml in `{}`", path.display()).into()), + } + } else { + Ok(all_packages.into_iter().map(|(_, v)| v).collect()) + } +} + +fn walk(path: &Path, callback: &mut FnMut(&Path) -> CargoResult) + -> CargoResult<()> { + if !callback(path)? { + trace!("not processing {}", path.display()); + return Ok(()) + } + + // Ignore any permission denied errors because temporary directories + // can often have some weird permissions on them. + let dirs = match fs::read_dir(path) { + Ok(dirs) => dirs, + Err(ref e) if e.kind() == io::ErrorKind::PermissionDenied => { + return Ok(()) + } + Err(e) => { + return Err(e).chain_err(|| { + format!("failed to read directory `{}`", path.display()) + }) + } + }; + for dir in dirs { + let dir = dir?; + if dir.file_type()?.is_dir() { + walk(&dir.path(), callback)?; + } + } + Ok(()) +} + +fn has_manifest(path: &Path) -> bool { + find_project_manifest_exact(path, "Cargo.toml").is_ok() +} + +fn read_nested_packages(path: &Path, + all_packages: &mut HashMap, + source_id: &SourceId, + config: &Config, + visited: &mut HashSet, + errors: &mut Vec) -> CargoResult<()> { + if !visited.insert(path.to_path_buf()) { return Ok(()) } + + let manifest_path = find_project_manifest_exact(path, "Cargo.toml")?; + + let (manifest, nested) = match read_manifest(&manifest_path, source_id, config) { + Err(err) => { + // Ignore malformed manifests found on git repositories + // + // git source try to find and read all manifests from the repository + // but since it's not possible to exclude folders from this search + // it's safer to ignore malformed manifests to avoid + // + // TODO: Add a way to exclude folders? + info!("skipping malformed package found at `{}`", + path.to_string_lossy()); + errors.push(err); + return Ok(()); + } + Ok(tuple) => tuple + }; + + let manifest = match manifest { + EitherManifest::Real(manifest) => manifest, + EitherManifest::Virtual(..) => return Ok(()), + }; + let pkg = Package::new(manifest, &manifest_path); + + let pkg_id = pkg.package_id().clone(); + if !all_packages.contains_key(&pkg_id) { + all_packages.insert(pkg_id, pkg); + } else { + info!("skipping nested package `{}` found at `{}`", + pkg.name(), path.to_string_lossy()); + } + + // Registry sources are not allowed to have `path=` dependencies because + // they're all translated to actual registry dependencies. + // + // We normalize the path here ensure that we don't infinitely walk around + // looking for crates. By normalizing we ensure that we visit this crate at + // most once. + // + // TODO: filesystem/symlink implications? + if !source_id.is_registry() { + for p in nested.iter() { + let path = util::normalize_path(&path.join(p)); + read_nested_packages(&path, all_packages, source_id, + config, visited, errors)?; + } + } + + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_run.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_run.rs new file mode 100644 index 000000000..3a4e7f6f2 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_run.rs @@ -0,0 +1,74 @@ +use std::path::Path; + +use ops::{self, Packages}; +use util::{self, CargoResult, CargoError, ProcessError}; +use util::errors::CargoErrorKind; +use core::Workspace; + +pub fn run(ws: &Workspace, + options: &ops::CompileOptions, + args: &[String]) -> CargoResult> { + let config = ws.config(); + + let pkg = match options.spec { + Packages::All => unreachable!("cargo run supports single package only"), + Packages::OptOut(_) => unreachable!("cargo run supports single package only"), + Packages::Packages(xs) => match xs.len() { + 0 => ws.current()?, + 1 => ws.members() + .find(|pkg| pkg.name() == xs[0]) + .ok_or_else(|| + CargoError::from( + format!("package `{}` is not a member of the workspace", xs[0])) + )?, + _ => unreachable!("cargo run supports single package only"), + } + }; + + let mut bins = pkg.manifest().targets().iter().filter(|a| { + !a.is_lib() && !a.is_custom_build() && if !options.filter.is_specific() { + a.is_bin() + } else { + options.filter.matches(a) + } + }); + if bins.next().is_none() { + if !options.filter.is_specific() { + bail!("a bin target must be available for `cargo run`") + } else { + // this will be verified in cargo_compile + } + } + if bins.next().is_some() { + if !options.filter.is_specific() { + bail!("`cargo run` requires that a project only have one \ + executable; use the `--bin` option to specify which one \ + to run") + } else { + bail!("`cargo run` can run at most one executable, but \ + multiple were specified") + } + } + + let compile = ops::compile(ws, options)?; + assert_eq!(compile.binaries.len(), 1); + let exe = &compile.binaries[0]; + let exe = match util::without_prefix(exe, config.cwd()) { + Some(path) if path.file_name() == Some(path.as_os_str()) + => Path::new(".").join(path).to_path_buf(), + Some(path) => path.to_path_buf(), + None => exe.to_path_buf(), + }; + let mut process = compile.target_process(exe, pkg)?; + process.args(args).cwd(config.cwd()); + + config.shell().status("Running", process.to_string())?; + + let result = process.exec_replace(); + + match result { + Ok(()) => Ok(None), + Err(CargoError(CargoErrorKind::ProcessErrorKind(e), ..)) => Ok(Some(e)), + Err(e) => Err(e) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/compilation.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/compilation.rs new file mode 100644 index 000000000..b3643b6bf --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/compilation.rs @@ -0,0 +1,187 @@ +use std::collections::{HashMap, HashSet}; +use std::ffi::OsStr; +use std::path::PathBuf; +use semver::Version; + +use core::{PackageId, Package, Target, TargetKind}; +use util::{self, CargoResult, Config, LazyCell, ProcessBuilder, process, join_paths}; + +/// A structure returning the result of a compilation. +pub struct Compilation<'cfg> { + /// A mapping from a package to the list of libraries that need to be + /// linked when working with that package. + pub libraries: HashMap>, + + /// An array of all tests created during this compilation. + pub tests: Vec<(Package, TargetKind, String, PathBuf)>, + + /// An array of all binaries created. + pub binaries: Vec, + + /// All directories for the output of native build commands. + /// + /// This is currently used to drive some entries which are added to the + /// LD_LIBRARY_PATH as appropriate. + // TODO: deprecated, remove + pub native_dirs: HashSet, + + /// Root output directory (for the local package's artifacts) + pub root_output: PathBuf, + + /// Output directory for rust dependencies. + /// May be for the host or for a specific target. + pub deps_output: PathBuf, + + /// Output directory for the rust host dependencies. + pub host_deps_output: PathBuf, + + /// The path to rustc's own libstd + pub host_dylib_path: Option, + + /// The path to libstd for the target + pub target_dylib_path: Option, + + /// Extra environment variables that were passed to compilations and should + /// be passed to future invocations of programs. + pub extra_env: HashMap>, + + pub to_doc_test: Vec, + + /// Features per package enabled during this compilation. + pub cfgs: HashMap>, + + pub target: String, + + config: &'cfg Config, + + target_runner: LazyCell)>>, +} + +impl<'cfg> Compilation<'cfg> { + pub fn new(config: &'cfg Config) -> Compilation<'cfg> { + Compilation { + libraries: HashMap::new(), + native_dirs: HashSet::new(), // TODO: deprecated, remove + root_output: PathBuf::from("/"), + deps_output: PathBuf::from("/"), + host_deps_output: PathBuf::from("/"), + host_dylib_path: None, + target_dylib_path: None, + tests: Vec::new(), + binaries: Vec::new(), + extra_env: HashMap::new(), + to_doc_test: Vec::new(), + cfgs: HashMap::new(), + config: config, + target: String::new(), + target_runner: LazyCell::new(), + } + } + + /// See `process`. + pub fn rustc_process(&self, pkg: &Package) -> CargoResult { + self.fill_env(self.config.rustc()?.process(), pkg, true) + } + + /// See `process`. + pub fn rustdoc_process(&self, pkg: &Package) -> CargoResult { + self.fill_env(process(&*self.config.rustdoc()?), pkg, false) + } + + /// See `process`. + pub fn host_process>(&self, cmd: T, pkg: &Package) + -> CargoResult { + self.fill_env(process(cmd), pkg, true) + } + + fn target_runner(&self) -> CargoResult<&Option<(PathBuf, Vec)>> { + self.target_runner.get_or_try_init(|| { + let key = format!("target.{}.runner", self.target); + Ok(self.config.get_path_and_args(&key)?.map(|v| v.val)) + }) + } + + /// See `process`. + pub fn target_process>(&self, cmd: T, pkg: &Package) + -> CargoResult { + let builder = if let Some((ref runner, ref args)) = *self.target_runner()? { + let mut builder = process(runner); + builder.args(args); + builder.arg(cmd); + builder + } else { + process(cmd) + }; + self.fill_env(builder, pkg, false) + } + + /// Prepares a new process with an appropriate environment to run against + /// the artifacts produced by the build process. + /// + /// The package argument is also used to configure environment variables as + /// well as the working directory of the child process. + fn fill_env(&self, mut cmd: ProcessBuilder, pkg: &Package, is_host: bool) + -> CargoResult { + + let mut search_path = if is_host { + let mut search_path = vec![self.host_deps_output.clone()]; + search_path.extend(self.host_dylib_path.clone()); + search_path + } else { + let mut search_path = + super::filter_dynamic_search_path(self.native_dirs.iter(), + &self.root_output); + search_path.push(self.root_output.clone()); + search_path.push(self.deps_output.clone()); + search_path.extend(self.target_dylib_path.clone()); + search_path + }; + + search_path.extend(util::dylib_path().into_iter()); + let search_path = join_paths(&search_path, util::dylib_path_envvar())?; + + cmd.env(util::dylib_path_envvar(), &search_path); + if let Some(env) = self.extra_env.get(pkg.package_id()) { + for &(ref k, ref v) in env { + cmd.env(k, v); + } + } + + let metadata = pkg.manifest().metadata(); + + let cargo_exe = self.config.cargo_exe()?; + cmd.env(::CARGO_ENV, cargo_exe); + + // When adding new environment variables depending on + // crate properties which might require rebuild upon change + // consider adding the corresponding properties to the hash + // in Context::target_metadata() + cmd.env("CARGO_MANIFEST_DIR", pkg.root()) + .env("CARGO_PKG_VERSION_MAJOR", &pkg.version().major.to_string()) + .env("CARGO_PKG_VERSION_MINOR", &pkg.version().minor.to_string()) + .env("CARGO_PKG_VERSION_PATCH", &pkg.version().patch.to_string()) + .env("CARGO_PKG_VERSION_PRE", &pre_version_component(pkg.version())) + .env("CARGO_PKG_VERSION", &pkg.version().to_string()) + .env("CARGO_PKG_NAME", &pkg.name()) + .env("CARGO_PKG_DESCRIPTION", metadata.description.as_ref().unwrap_or(&String::new())) + .env("CARGO_PKG_HOMEPAGE", metadata.homepage.as_ref().unwrap_or(&String::new())) + .env("CARGO_PKG_AUTHORS", &pkg.authors().join(":")) + .cwd(pkg.root()); + Ok(cmd) + } +} + +fn pre_version_component(v: &Version) -> String { + if v.pre.is_empty() { + return String::new(); + } + + let mut ret = String::new(); + + for (i, x) in v.pre.iter().enumerate() { + if i != 0 { ret.push('.') }; + ret.push_str(&x.to_string()); + } + + ret +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/context.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/context.rs new file mode 100644 index 000000000..369c9d7c5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/context.rs @@ -0,0 +1,1313 @@ +#![allow(deprecated)] + +use std::collections::{HashSet, HashMap, BTreeSet}; +use std::collections::hash_map::Entry; +use std::env; +use std::fmt; +use std::hash::{Hasher, Hash, SipHasher}; +use std::path::{Path, PathBuf}; +use std::str::{self, FromStr}; +use std::sync::Arc; +use std::cell::RefCell; + +use jobserver::Client; + +use core::{Package, PackageId, PackageSet, Resolve, Target, Profile}; +use core::{TargetKind, Profiles, Dependency, Workspace}; +use core::dependency::Kind as DepKind; +use util::{self, ProcessBuilder, internal, Config, profile, Cfg, CfgExpr}; +use util::errors::{CargoResult, CargoResultExt}; + +use super::TargetConfig; +use super::custom_build::{BuildState, BuildScripts, BuildDeps}; +use super::fingerprint::Fingerprint; +use super::layout::Layout; +use super::links::Links; +use super::{Kind, Compilation, BuildConfig}; + +/// All information needed to define a Unit. +/// +/// A unit is an object that has enough information so that cargo knows how to build it. +/// For example, if your project has dependencies, then every dependency will be built as a library +/// unit. If your project is a library, then it will be built as a library unit as well, or if it +/// is a binary with `main.rs`, then a binary will be output. There are also separate unit types +/// for `test`ing and `check`ing, amongst others. +/// +/// The unit also holds information about all possible metadata about the package in `pkg`. +/// +/// A unit needs to know extra information in addition to the type and root source file. For +/// example, it needs to know the target architecture (OS, chip arch etc.) and it needs to know +/// whether you want a debug or release build. There is enough information in this struct to figure +/// all that out. +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct Unit<'a> { + /// Information about avaiable targets, which files to include/exclude, etc. Basically stuff in + /// `Cargo.toml`. + pub pkg: &'a Package, + /// Information about the specific target to build, out of the possible targets in `pkg`. Not + /// to be confused with *target-triple* (or *target architecture* ...), the target arch for a + /// build. + pub target: &'a Target, + /// The profile contains information about *how* the build should be run, including debug + /// level, extra args to pass to rustc, etc. + pub profile: &'a Profile, + /// Whether this compilation unit is for the host or target architecture. + /// + /// For example, when + /// cross compiling and using a custom build script, the build script needs to be compiled for + /// the host architecture so the host rustc can use it (when compiling to the target + /// architecture). + pub kind: Kind, +} + +/// Type of each file generated by a Unit. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum TargetFileType { + /// Not a special file type. + Normal, + /// It is something you can link against (e.g. a library) + Linkable, + /// It is a piece of external debug information (e.g. *.dSYM and *.pdb) + DebugInfo, +} + +/// The build context, containing all information about a build task +pub struct Context<'a, 'cfg: 'a> { + /// The workspace the build is for + pub ws: &'a Workspace<'cfg>, + /// The cargo configuration + pub config: &'cfg Config, + /// The dependency graph for our build + pub resolve: &'a Resolve, + /// Information on the compilation output + pub compilation: Compilation<'cfg>, + pub packages: &'a PackageSet<'cfg>, + pub build_state: Arc, + pub build_script_overridden: HashSet<(PackageId, Kind)>, + pub build_explicit_deps: HashMap, BuildDeps>, + pub fingerprints: HashMap, Arc>, + pub compiled: HashSet>, + pub build_config: BuildConfig, + pub build_scripts: HashMap, Arc>, + pub links: Links<'a>, + pub used_in_plugin: HashSet>, + pub jobserver: Client, + + /// The target directory layout for the host (and target if it is the same as host) + host: Layout, + /// The target directory layout for the target (if different from then host) + target: Option, + target_info: TargetInfo, + host_info: TargetInfo, + profiles: &'a Profiles, + incremental_enabled: bool, + + /// For each Unit, a list all files produced as a triple of + /// + /// - File name that will be produced by the build process (in `deps`) + /// - If it should be linked into `target`, and what it should be called (e.g. without + /// metadata). + /// - Type of the file (library / debug symbol / else) + target_filenames: HashMap, Arc, TargetFileType)>>>, + target_metadatas: HashMap, Option>, +} + +#[derive(Clone, Default)] +struct TargetInfo { + crate_type_process: Option, + crate_types: RefCell>>, + cfg: Option>, +} + +impl TargetInfo { + fn discover_crate_type(&self, crate_type: &str) -> CargoResult> { + let mut process = self.crate_type_process.clone().unwrap(); + + process.arg("--crate-type").arg(crate_type); + + let output = process.exec_with_output().chain_err(|| { + format!("failed to run `rustc` to learn about \ + crate-type {} information", crate_type) + })?; + + let error = str::from_utf8(&output.stderr).unwrap(); + let output = str::from_utf8(&output.stdout).unwrap(); + Ok(parse_crate_type(crate_type, error, &mut output.lines())?) + } +} + +#[derive(Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +pub struct Metadata(u64); + +impl<'a, 'cfg> Context<'a, 'cfg> { + pub fn new(ws: &'a Workspace<'cfg>, + resolve: &'a Resolve, + packages: &'a PackageSet<'cfg>, + config: &'cfg Config, + build_config: BuildConfig, + profiles: &'a Profiles) -> CargoResult> { + + let dest = if build_config.release { "release" } else { "debug" }; + let host_layout = Layout::new(ws, None, dest)?; + let target_layout = match build_config.requested_target.as_ref() { + Some(target) => Some(Layout::new(ws, Some(target), dest)?), + None => None, + }; + + // Enable incremental builds if the user opts in. For now, + // this is an environment variable until things stabilize a + // bit more. + let incremental_enabled = match env::var("CARGO_INCREMENTAL") { + Ok(v) => v == "1", + Err(_) => false, + }; + + // -Z can only be used on nightly builds; other builds complain loudly. + // Since incremental builds only work on nightly anyway, we silently + // ignore CARGO_INCREMENTAL on anything but nightly. This allows users + // to always have CARGO_INCREMENTAL set without getting unexpected + // errors on stable/beta builds. + let is_nightly = + config.rustc()?.verbose_version.contains("-nightly") || + config.rustc()?.verbose_version.contains("-dev"); + let incremental_enabled = incremental_enabled && is_nightly; + + // Load up the jobserver that we'll use to manage our parallelism. This + // is the same as the GNU make implementation of a jobserver, and + // intentionally so! It's hoped that we can interact with GNU make and + // all share the same jobserver. + // + // Note that if we don't have a jobserver in our environment then we + // create our own, and we create it with `n-1` tokens because one token + // is ourself, a running process. + let jobserver = match config.jobserver_from_env() { + Some(c) => c.clone(), + None => Client::new(build_config.jobs as usize - 1).chain_err(|| { + "failed to create jobserver" + })?, + }; + + Ok(Context { + ws: ws, + host: host_layout, + target: target_layout, + resolve: resolve, + packages: packages, + config: config, + target_info: TargetInfo::default(), + host_info: TargetInfo::default(), + compilation: Compilation::new(config), + build_state: Arc::new(BuildState::new(&build_config)), + build_config: build_config, + fingerprints: HashMap::new(), + profiles: profiles, + compiled: HashSet::new(), + build_scripts: HashMap::new(), + build_explicit_deps: HashMap::new(), + links: Links::new(), + used_in_plugin: HashSet::new(), + incremental_enabled: incremental_enabled, + jobserver: jobserver, + build_script_overridden: HashSet::new(), + + // TODO: Pre-Calculate these with a topo-sort, rather than lazy-calculating + target_filenames: HashMap::new(), + target_metadatas: HashMap::new(), + }) + } + + /// Prepare this context, ensuring that all filesystem directories are in + /// place. + pub fn prepare(&mut self) -> CargoResult<()> { + let _p = profile::start("preparing layout"); + + self.host.prepare().chain_err(|| { + internal("couldn't prepare build directories") + })?; + if let Some(ref mut target) = self.target { + target.prepare().chain_err(|| { + internal("couldn't prepare build directories") + })?; + } + + self.compilation.host_deps_output = self.host.deps().to_path_buf(); + + let layout = self.target.as_ref().unwrap_or(&self.host); + self.compilation.root_output = layout.dest().to_path_buf(); + self.compilation.deps_output = layout.deps().to_path_buf(); + Ok(()) + } + + /// Ensure that we've collected all target-specific information to compile + /// all the units mentioned in `units`. + pub fn probe_target_info(&mut self, units: &[Unit<'a>]) -> CargoResult<()> { + let mut crate_types = BTreeSet::new(); + let mut visited_units = HashSet::new(); + // pre-fill with `bin` for learning about tests (nothing may be + // explicitly `bin`) as well as `rlib` as it's the coalesced version of + // `lib` in the compiler and we're not sure which we'll see. + crate_types.insert("bin".to_string()); + crate_types.insert("rlib".to_string()); + for unit in units { + self.visit_crate_type(unit, &mut crate_types, &mut visited_units)?; + } + debug!("probe_target_info: crate_types={:?}", crate_types); + self.probe_target_info_kind(&crate_types, Kind::Target)?; + if self.requested_target().is_none() { + self.host_info = self.target_info.clone(); + } else { + self.probe_target_info_kind(&crate_types, Kind::Host)?; + } + Ok(()) + } + + /// A recursive function that checks all crate types (`rlib`, ...) are in `crate_types` + /// for this unit and its dependencies. + /// + /// Tracks visited units to avoid unnecessary work. + fn visit_crate_type(&self, + unit: &Unit<'a>, + crate_types: &mut BTreeSet, + visited_units: &mut HashSet>) + -> CargoResult<()> { + if !visited_units.insert(*unit) { + return Ok(()); + } + for target in unit.pkg.manifest().targets() { + crate_types.extend(target.rustc_crate_types().iter().map(|s| { + if *s == "lib" { + "rlib".to_string() + } else { + s.to_string() + } + })); + } + for dep in self.dep_targets(unit)? { + self.visit_crate_type(&dep, crate_types, visited_units)?; + } + Ok(()) + } + + fn probe_target_info_kind(&mut self, + crate_types: &BTreeSet, + kind: Kind) + -> CargoResult<()> { + let rustflags = env_args(self.config, + &self.build_config, + self.info(&kind), + kind, + "RUSTFLAGS")?; + let mut process = self.config.rustc()?.process(); + process.arg("-") + .arg("--crate-name").arg("___") + .arg("--print=file-names") + .args(&rustflags) + .env_remove("RUST_LOG"); + + if kind == Kind::Target { + process.arg("--target").arg(&self.target_triple()); + } + + let crate_type_process = process.clone(); + + for crate_type in crate_types { + process.arg("--crate-type").arg(crate_type); + } + + let mut with_cfg = process.clone(); + with_cfg.arg("--print=sysroot"); + with_cfg.arg("--print=cfg"); + + let mut has_cfg_and_sysroot = true; + let output = with_cfg.exec_with_output().or_else(|_| { + has_cfg_and_sysroot = false; + process.exec_with_output() + }).chain_err(|| { + "failed to run `rustc` to learn about target-specific information" + })?; + + let error = str::from_utf8(&output.stderr).unwrap(); + let output = str::from_utf8(&output.stdout).unwrap(); + let mut lines = output.lines(); + let mut map = HashMap::new(); + for crate_type in crate_types { + let out = parse_crate_type(crate_type, error, &mut lines)?; + map.insert(crate_type.to_string(), out); + } + + if has_cfg_and_sysroot { + let line = match lines.next() { + Some(line) => line, + None => bail!("output of --print=sysroot missing when learning about \ + target-specific information from rustc"), + }; + let mut rustlib = PathBuf::from(line); + if kind == Kind::Host { + if cfg!(windows) { + rustlib.push("bin"); + } else { + rustlib.push("lib"); + } + self.compilation.host_dylib_path = Some(rustlib); + } else { + rustlib.push("lib"); + rustlib.push("rustlib"); + rustlib.push(self.target_triple()); + rustlib.push("lib"); + self.compilation.target_dylib_path = Some(rustlib); + } + } + + let cfg = if has_cfg_and_sysroot { + Some(try!(lines.map(Cfg::from_str).collect())) + } else { + None + }; + + let info = match kind { + Kind::Target => &mut self.target_info, + Kind::Host => &mut self.host_info, + }; + info.crate_type_process = Some(crate_type_process); + info.crate_types = RefCell::new(map); + info.cfg = cfg; + Ok(()) + } + + /// Builds up the `used_in_plugin` internal to this context from the list of + /// top-level units. + /// + /// This will recursively walk `units` and all of their dependencies to + /// determine which crate are going to be used in plugins or not. + pub fn build_used_in_plugin_map(&mut self, units: &[Unit<'a>]) + -> CargoResult<()> { + let mut visited = HashSet::new(); + for unit in units { + self.walk_used_in_plugin_map(unit, + unit.target.for_host(), + &mut visited)?; + } + Ok(()) + } + + fn walk_used_in_plugin_map(&mut self, + unit: &Unit<'a>, + is_plugin: bool, + visited: &mut HashSet<(Unit<'a>, bool)>) + -> CargoResult<()> { + if !visited.insert((*unit, is_plugin)) { + return Ok(()) + } + if is_plugin { + self.used_in_plugin.insert(*unit); + } + for unit in self.dep_targets(unit)? { + self.walk_used_in_plugin_map(&unit, + is_plugin || unit.target.for_host(), + visited)?; + } + Ok(()) + } + + /// Returns the appropriate directory layout for either a plugin or not. + fn layout(&self, kind: Kind) -> &Layout { + match kind { + Kind::Host => &self.host, + Kind::Target => self.target.as_ref().unwrap_or(&self.host) + } + } + + /// Returns the directories where Rust crate dependencies are found for the + /// specified unit. + pub fn deps_dir(&self, unit: &Unit) -> &Path { + self.layout(unit.kind).deps() + } + + /// Returns the directory for the specified unit where fingerprint + /// information is stored. + pub fn fingerprint_dir(&mut self, unit: &Unit<'a>) -> PathBuf { + let dir = self.pkg_dir(unit); + self.layout(unit.kind).fingerprint().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_dir(&mut self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(!unit.profile.run_custom_build); + let dir = self.pkg_dir(unit); + self.layout(Kind::Host).build().join(dir) + } + + /// Returns the appropriate directory layout for either a plugin or not. + pub fn build_script_out_dir(&mut self, unit: &Unit<'a>) -> PathBuf { + assert!(unit.target.is_custom_build()); + assert!(unit.profile.run_custom_build); + let dir = self.pkg_dir(unit); + self.layout(unit.kind).build().join(dir).join("out") + } + + pub fn host_deps(&self) -> &Path { + self.host.deps() + } + + /// Return the root of the build output tree + pub fn target_root(&self) -> &Path { + self.host.dest() + } + + /// Returns the appropriate output directory for the specified package and + /// target. + pub fn out_dir(&mut self, unit: &Unit<'a>) -> PathBuf { + if unit.profile.doc { + self.layout(unit.kind).root().parent().unwrap().join("doc") + } else if unit.target.is_custom_build() { + self.build_script_dir(unit) + } else if unit.target.is_example() { + self.layout(unit.kind).examples().to_path_buf() + } else { + self.deps_dir(unit).to_path_buf() + } + } + + fn pkg_dir(&mut self, unit: &Unit<'a>) -> String { + let name = unit.pkg.package_id().name(); + match self.target_metadata(unit) { + Some(meta) => format!("{}-{}", name, meta), + None => format!("{}-{}", name, self.target_short_hash(unit)), + } + } + + /// Return the host triple for this context + pub fn host_triple(&self) -> &str { + &self.build_config.host_triple + } + + /// Return the target triple which this context is targeting. + pub fn target_triple(&self) -> &str { + self.requested_target().unwrap_or(self.host_triple()) + } + + /// Requested (not actual) target for the build + pub fn requested_target(&self) -> Option<&str> { + self.build_config.requested_target.as_ref().map(|s| &s[..]) + } + + /// Get the short hash based only on the PackageId + /// Used for the metadata when target_metadata returns None + pub fn target_short_hash(&self, unit: &Unit) -> String { + let hashable = unit.pkg.package_id().stable_hash(self.ws.root()); + util::short_hash(&hashable) + } + + /// Get the metadata for a target in a specific profile + /// We build to the path: "{filename}-{target_metadata}" + /// We use a linking step to link/copy to a predictable filename + /// like `target/debug/libfoo.{a,so,rlib}` and such. + pub fn target_metadata(&mut self, unit: &Unit<'a>) -> Option { + if let Some(cache) = self.target_metadatas.get(unit) { + return cache.clone() + } + + let metadata = self.calc_target_metadata(unit); + self.target_metadatas.insert(*unit, metadata.clone()); + metadata + } + + fn calc_target_metadata(&mut self, unit: &Unit<'a>) -> Option { + // No metadata for dylibs because of a couple issues + // - OSX encodes the dylib name in the executable + // - Windows rustc multiple files of which we can't easily link all of them + // + // No metadata for bin because of an issue + // - wasm32 rustc/emcc encodes the .wasm name in the .js (rust-lang/cargo#4535) + // + // Two exceptions + // 1) Upstream dependencies (we aren't exporting + need to resolve name conflict) + // 2) __CARGO_DEFAULT_LIB_METADATA env var + // + // Note, though, that the compiler's build system at least wants + // path dependencies (eg libstd) to have hashes in filenames. To account for + // that we have an extra hack here which reads the + // `__CARGO_DEFAULT_LIB_METADATA` environment variable and creates a + // hash in the filename if that's present. + // + // This environment variable should not be relied on! It's + // just here for rustbuild. We need a more principled method + // doing this eventually. + let __cargo_default_lib_metadata = env::var("__CARGO_DEFAULT_LIB_METADATA"); + if !unit.profile.test && + (unit.target.is_dylib() || unit.target.is_cdylib() || + (unit.target.is_bin() && self.target_triple().starts_with("wasm32-"))) && + unit.pkg.package_id().source_id().is_path() && + !__cargo_default_lib_metadata.is_ok() + { + return None; + } + + let mut hasher = SipHasher::new_with_keys(0, 0); + + // Unique metadata per (name, source, version) triple. This'll allow us + // to pull crates from anywhere w/o worrying about conflicts + unit.pkg.package_id().stable_hash(self.ws.root()).hash(&mut hasher); + + // Add package properties which map to environment variables + // exposed by Cargo + let manifest_metadata = unit.pkg.manifest().metadata(); + manifest_metadata.authors.hash(&mut hasher); + manifest_metadata.description.hash(&mut hasher); + manifest_metadata.homepage.hash(&mut hasher); + + // Also mix in enabled features to our metadata. This'll ensure that + // when changing feature sets each lib is separately cached. + self.resolve.features_sorted(unit.pkg.package_id()).hash(&mut hasher); + + // Mix in the target-metadata of all the dependencies of this target + if let Ok(deps) = self.dep_targets(unit) { + let mut deps_metadata = deps.into_iter().map(|dep_unit| { + self.target_metadata(&dep_unit) + }).collect::>(); + deps_metadata.sort(); + deps_metadata.hash(&mut hasher); + } + + // Throw in the profile we're compiling with. This helps caching + // panic=abort and panic=unwind artifacts, additionally with various + // settings like debuginfo and whatnot. + unit.profile.hash(&mut hasher); + + // Artifacts compiled for the host should have a different metadata + // piece than those compiled for the target, so make sure we throw in + // the unit's `kind` as well + unit.kind.hash(&mut hasher); + + // Finally throw in the target name/kind. This ensures that concurrent + // compiles of targets in the same crate don't collide. + unit.target.name().hash(&mut hasher); + unit.target.kind().hash(&mut hasher); + + if let Ok(rustc) = self.config.rustc() { + rustc.verbose_version.hash(&mut hasher); + } + + // Seed the contents of __CARGO_DEFAULT_LIB_METADATA to the hasher if present. + // This should be the release channel, to get a different hash for each channel. + if let Ok(ref channel) = __cargo_default_lib_metadata { + channel.hash(&mut hasher); + } + + Some(Metadata(hasher.finish())) + } + + /// Returns the file stem for a given target/profile combo (with metadata) + pub fn file_stem(&mut self, unit: &Unit<'a>) -> String { + match self.target_metadata(unit) { + Some(ref metadata) => format!("{}-{}", unit.target.crate_name(), + metadata), + None => self.bin_stem(unit), + } + } + + /// Returns the bin stem for a given target (without metadata) + fn bin_stem(&self, unit: &Unit) -> String { + if unit.target.allows_underscores() { + unit.target.name().to_string() + } else { + unit.target.crate_name() + } + } + + /// Returns a tuple with the directory and name of the hard link we expect + /// our target to be copied to. Eg, file_stem may be out_dir/deps/foo-abcdef + /// and link_stem would be out_dir/foo + /// This function returns it in two parts so the caller can add prefix/suffix + /// to filename separately + /// + /// Returns an Option because in some cases we don't want to link + /// (eg a dependent lib) + pub fn link_stem(&mut self, unit: &Unit<'a>) -> Option<(PathBuf, String)> { + let src_dir = self.out_dir(unit); + let bin_stem = self.bin_stem(unit); + let file_stem = self.file_stem(unit); + + // We currently only lift files up from the `deps` directory. If + // it was compiled into something like `example/` or `doc/` then + // we don't want to link it up. + if src_dir.ends_with("deps") { + // Don't lift up library dependencies + if self.ws.members().find(|&p| p == unit.pkg).is_none() && + !unit.target.is_bin() { + None + } else { + Some(( + src_dir.parent().unwrap().to_owned(), + if unit.profile.test {file_stem} else {bin_stem}, + )) + } + } else if bin_stem == file_stem { + None + } else if src_dir.ends_with("examples") + || src_dir.parent().unwrap().ends_with("build") { + Some((src_dir, bin_stem)) + } else { + None + } + } + + /// Return the filenames that the given target for the given profile will + /// generate as a list of 3-tuples (filename, link_dst, linkable) + /// + /// - filename: filename rustc compiles to. (Often has metadata suffix). + /// - link_dst: Optional file to link/copy the result to (without metadata suffix) + /// - linkable: Whether possible to link against file (eg it's a library) + pub fn target_filenames(&mut self, unit: &Unit<'a>) + -> CargoResult, TargetFileType)>>> { + if let Some(cache) = self.target_filenames.get(unit) { + return Ok(Arc::clone(cache)) + } + + let result = self.calc_target_filenames(unit); + if let Ok(ref ret) = result { + self.target_filenames.insert(*unit, Arc::clone(ret)); + } + result + } + + fn calc_target_filenames(&mut self, unit: &Unit<'a>) + -> CargoResult, TargetFileType)>>> { + let out_dir = self.out_dir(unit); + let stem = self.file_stem(unit); + let link_stem = self.link_stem(unit); + let info = if unit.target.for_host() { + &self.host_info + } else { + &self.target_info + }; + + let mut ret = Vec::new(); + let mut unsupported = Vec::new(); + { + if unit.profile.check { + let filename = out_dir.join(format!("lib{}.rmeta", stem)); + let link_dst = link_stem.clone().map(|(ld, ls)| { + ld.join(format!("lib{}.rmeta", ls)) + }); + ret.push((filename, link_dst, TargetFileType::Linkable)); + } else { + let mut add = |crate_type: &str, file_type: TargetFileType| -> CargoResult<()> { + let crate_type = if crate_type == "lib" {"rlib"} else {crate_type}; + let mut crate_types = info.crate_types.borrow_mut(); + let entry = crate_types.entry(crate_type.to_string()); + let crate_type_info = match entry { + Entry::Occupied(o) => &*o.into_mut(), + Entry::Vacant(v) => { + let value = info.discover_crate_type(v.key())?; + &*v.insert(value) + } + }; + match *crate_type_info { + Some((ref prefix, ref suffix)) => { + let suffixes = add_target_specific_suffixes( + &self.target_triple(), + &crate_type, + unit.target.kind(), + suffix, + file_type, + ); + for (suffix, file_type, should_replace_hyphens) in suffixes { + // wasm bin target will generate two files in deps such as + // "web-stuff.js" and "web_stuff.wasm". Note the different usages of + // "-" and "_". should_replace_hyphens is a flag to indicate that + // we need to convert the stem "web-stuff" to "web_stuff", so we + // won't miss "web_stuff.wasm". + let conv = |s: String| if should_replace_hyphens { + s.replace("-", "_") + } else { + s + }; + let filename = + out_dir.join(format!("{}{}{}", prefix, conv(stem.clone()), suffix)); + let link_dst = link_stem.clone().map(|(ld, ls)| { + ld.join(format!("{}{}{}", prefix, conv(ls), suffix)) + }); + ret.push((filename, link_dst, file_type)); + } + Ok(()) + } + // not supported, don't worry about it + None => { + unsupported.push(crate_type.to_string()); + Ok(()) + } + } + }; + //info!("{:?}", unit); + match *unit.target.kind() { + TargetKind::Bin | + TargetKind::CustomBuild | + TargetKind::ExampleBin | + TargetKind::Bench | + TargetKind::Test => { + add("bin", TargetFileType::Normal)?; + } + TargetKind::Lib(..) | + TargetKind::ExampleLib(..) + if unit.profile.test => { + add("bin", TargetFileType::Normal)?; + } + TargetKind::ExampleLib(ref kinds) | + TargetKind::Lib(ref kinds) => { + for kind in kinds { + add(kind.crate_type(), if kind.linkable() { + TargetFileType::Linkable + } else { + TargetFileType::Normal + })?; + } + } + } + } + } + if ret.is_empty() { + if !unsupported.is_empty() { + bail!("cannot produce {} for `{}` as the target `{}` \ + does not support these crate types", + unsupported.join(", "), unit.pkg, self.target_triple()) + } + bail!("cannot compile `{}` as the target `{}` does not \ + support any of the output crate types", + unit.pkg, self.target_triple()); + } + info!("Target filenames: {:?}", ret); + + Ok(Arc::new(ret)) + } + + /// For a package, return all targets which are registered as dependencies + /// for that package. + pub fn dep_targets(&self, unit: &Unit<'a>) -> CargoResult>> { + if unit.profile.run_custom_build { + return self.dep_run_custom_build(unit) + } else if unit.profile.doc && !unit.profile.test { + return self.doc_deps(unit); + } + + let id = unit.pkg.package_id(); + let deps = self.resolve.deps(id); + let mut ret = deps.filter(|dep| { + unit.pkg.dependencies().iter().filter(|d| { + d.name() == dep.name() && d.version_req().matches(dep.version()) + }).any(|d| { + // If this target is a build command, then we only want build + // dependencies, otherwise we want everything *other than* build + // dependencies. + if unit.target.is_custom_build() != d.is_build() { + return false + } + + // If this dependency is *not* a transitive dependency, then it + // only applies to test/example targets + if !d.is_transitive() && !unit.target.is_test() && + !unit.target.is_example() && !unit.profile.test { + return false + } + + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + if !self.dep_platform_activated(d, unit.kind) { + return false + } + + // If the dependency is optional, then we're only activating it + // if the corresponding feature was activated + if d.is_optional() && !self.resolve.features(id).contains(d.name()) { + return false; + } + + // If we've gotten past all that, then this dependency is + // actually used! + true + }) + }).filter_map(|id| { + match self.get_package(id) { + Ok(pkg) => { + pkg.targets().iter().find(|t| t.is_lib()).map(|t| { + let unit = Unit { + pkg: pkg, + target: t, + profile: self.lib_or_check_profile(unit, t), + kind: unit.kind.for_target(t), + }; + Ok(unit) + }) + } + Err(e) => Some(Err(e)) + } + }).collect::>>()?; + + // If this target is a build script, then what we've collected so far is + // all we need. If this isn't a build script, then it depends on the + // build script if there is one. + if unit.target.is_custom_build() { + return Ok(ret) + } + ret.extend(self.dep_build_script(unit)); + + // If this target is a binary, test, example, etc, then it depends on + // the library of the same package. The call to `resolve.deps` above + // didn't include `pkg` in the return values, so we need to special case + // it here and see if we need to push `(pkg, pkg_lib_target)`. + if unit.target.is_lib() && !unit.profile.doc { + return Ok(ret) + } + ret.extend(self.maybe_lib(unit)); + + // Integration tests/benchmarks require binaries to be built + if unit.profile.test && + (unit.target.is_test() || unit.target.is_bench()) { + ret.extend(unit.pkg.targets().iter().filter(|t| { + let no_required_features = Vec::new(); + + t.is_bin() && + // Skip binaries with required features that have not been selected. + t.required_features().unwrap_or(&no_required_features).iter().all(|f| { + self.resolve.features(id).contains(f) + }) + }).map(|t| { + Unit { + pkg: unit.pkg, + target: t, + profile: self.lib_profile(), + kind: unit.kind.for_target(t), + } + })); + } + Ok(ret) + } + + /// Returns the dependencies needed to run a build script. + /// + /// The `unit` provided must represent an execution of a build script, and + /// the returned set of units must all be run before `unit` is run. + pub fn dep_run_custom_build(&self, unit: &Unit<'a>) + -> CargoResult>> { + // If this build script's execution has been overridden then we don't + // actually depend on anything, we've reached the end of the dependency + // chain as we've got all the info we're gonna get. + let key = (unit.pkg.package_id().clone(), unit.kind); + if self.build_script_overridden.contains(&key) { + return Ok(Vec::new()) + } + + // When not overridden, then the dependencies to run a build script are: + // + // 1. Compiling the build script itself + // 2. For each immediate dependency of our package which has a `links` + // key, the execution of that build script. + let not_custom_build = unit.pkg.targets().iter().find(|t| { + !t.is_custom_build() + }).unwrap(); + let tmp = Unit { + target: not_custom_build, + profile: &self.profiles.dev, + ..*unit + }; + let deps = self.dep_targets(&tmp)?; + Ok(deps.iter().filter_map(|unit| { + if !unit.target.linkable() || unit.pkg.manifest().links().is_none() { + return None + } + self.dep_build_script(unit) + }).chain(Some(Unit { + profile: self.build_script_profile(unit.pkg.package_id()), + kind: Kind::Host, // build scripts always compiled for the host + ..*unit + })).collect()) + } + + /// Returns the dependencies necessary to document a package + fn doc_deps(&self, unit: &Unit<'a>) -> CargoResult>> { + let deps = self.resolve.deps(unit.pkg.package_id()).filter(|dep| { + unit.pkg.dependencies().iter().filter(|d| { + d.name() == dep.name() + }).any(|dep| { + match dep.kind() { + DepKind::Normal => self.dep_platform_activated(dep, + unit.kind), + _ => false, + } + }) + }).map(|dep| { + self.get_package(dep) + }); + + // To document a library, we depend on dependencies actually being + // built. If we're documenting *all* libraries, then we also depend on + // the documentation of the library being built. + let mut ret = Vec::new(); + for dep in deps { + let dep = dep?; + let lib = match dep.targets().iter().find(|t| t.is_lib()) { + Some(lib) => lib, + None => continue, + }; + ret.push(Unit { + pkg: dep, + target: lib, + profile: self.lib_profile(), + kind: unit.kind.for_target(lib), + }); + if self.build_config.doc_all { + ret.push(Unit { + pkg: dep, + target: lib, + profile: &self.profiles.doc, + kind: unit.kind.for_target(lib), + }); + } + } + + // Be sure to build/run the build script for documented libraries as + ret.extend(self.dep_build_script(unit)); + + // If we document a binary, we need the library available + if unit.target.is_bin() { + ret.extend(self.maybe_lib(unit)); + } + Ok(ret) + } + + /// If a build script is scheduled to be run for the package specified by + /// `unit`, this function will return the unit to run that build script. + /// + /// Overriding a build script simply means that the running of the build + /// script itself doesn't have any dependencies, so even in that case a unit + /// of work is still returned. `None` is only returned if the package has no + /// build script. + fn dep_build_script(&self, unit: &Unit<'a>) -> Option> { + unit.pkg.targets().iter().find(|t| t.is_custom_build()).map(|t| { + Unit { + pkg: unit.pkg, + target: t, + profile: &self.profiles.custom_build, + kind: unit.kind, + } + }) + } + + fn maybe_lib(&self, unit: &Unit<'a>) -> Option> { + unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { + Unit { + pkg: unit.pkg, + target: t, + profile: self.lib_or_check_profile(unit, t), + kind: unit.kind.for_target(t), + } + }) + } + + fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + let platform = match dep.platform() { + Some(p) => p, + None => return true, + }; + let (name, info) = match kind { + Kind::Host => (self.host_triple(), &self.host_info), + Kind::Target => (self.target_triple(), &self.target_info), + }; + platform.matches(name, info.cfg.as_ref().map(|cfg| &cfg[..])) + } + + /// Gets a package for the given package id. + pub fn get_package(&self, id: &PackageId) -> CargoResult<&'a Package> { + self.packages.get(id) + } + + /// Get the user-specified linker for a particular host or target + pub fn linker(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) + } + + /// Get the user-specified `ar` program for a particular host or target + pub fn ar(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) + } + + /// Get the list of cfg printed out from the compiler for the specified kind + pub fn cfg(&self, kind: Kind) -> &[Cfg] { + let info = match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + }; + info.cfg.as_ref().map(|s| &s[..]).unwrap_or(&[]) + } + + /// Get the target configuration for a particular host or target + fn target_config(&self, kind: Kind) -> &TargetConfig { + match kind { + Kind::Host => &self.build_config.host, + Kind::Target => &self.build_config.target, + } + } + + /// Number of jobs specified for this build + pub fn jobs(&self) -> u32 { self.build_config.jobs } + + pub fn lib_profile(&self) -> &'a Profile { + let (normal, test) = if self.build_config.release { + (&self.profiles.release, &self.profiles.bench_deps) + } else { + (&self.profiles.dev, &self.profiles.test_deps) + }; + if self.build_config.test { + test + } else { + normal + } + } + + pub fn lib_or_check_profile(&self, unit: &Unit, target: &Target) -> &'a Profile { + if unit.profile.check && !target.is_custom_build() && !target.for_host() { + &self.profiles.check + } else { + self.lib_profile() + } + } + + pub fn build_script_profile(&self, _pkg: &PackageId) -> &'a Profile { + // TODO: should build scripts always be built with the same library + // profile? How is this controlled at the CLI layer? + self.lib_profile() + } + + pub fn incremental_args(&self, unit: &Unit) -> CargoResult> { + if self.incremental_enabled { + if unit.pkg.package_id().source_id().is_path() { + // Only enable incremental compilation for sources the user can modify. + // For things that change infrequently, non-incremental builds yield + // better performance. + // (see also https://github.com/rust-lang/cargo/issues/3972) + return Ok(vec![format!("-Zincremental={}", + self.layout(unit.kind).incremental().display())]); + } else if unit.profile.codegen_units.is_none() { + // For non-incremental builds we set a higher number of + // codegen units so we get faster compiles. It's OK to do + // so because the user has already opted into slower + // runtime code by setting CARGO_INCREMENTAL. + return Ok(vec![format!("-Ccodegen-units={}", ::num_cpus::get())]); + } + } + + Ok(vec![]) + } + + pub fn rustflags_args(&self, unit: &Unit) -> CargoResult> { + env_args(self.config, &self.build_config, self.info(&unit.kind), unit.kind, "RUSTFLAGS") + } + + pub fn rustdocflags_args(&self, unit: &Unit) -> CargoResult> { + env_args(self.config, &self.build_config, self.info(&unit.kind), unit.kind, "RUSTDOCFLAGS") + } + + pub fn show_warnings(&self, pkg: &PackageId) -> bool { + pkg.source_id().is_path() || self.config.extra_verbose() + } + + fn info(&self, kind: &Kind) -> &TargetInfo { + match *kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } +} + +/// Acquire extra flags to pass to the compiler from various locations. +/// +/// The locations are: +/// +/// - the `RUSTFLAGS` environment variable +/// +/// then if this was not found +/// +/// - `target.*.rustflags` from the manifest (Cargo.toml) +/// - `target.cfg(..).rustflags` from the manifest +/// +/// then if neither of these were found +/// +/// - `build.rustflags` from the manifest +/// +/// Note that if a `target` is specified, no args will be passed to host code (plugins, build +/// scripts, ...), even if it is the same as the target. +fn env_args(config: &Config, + build_config: &BuildConfig, + target_info: &TargetInfo, + kind: Kind, + name: &str) -> CargoResult> { + // We *want* to apply RUSTFLAGS only to builds for the + // requested target architecture, and not to things like build + // scripts and plugins, which may be for an entirely different + // architecture. Cargo's present architecture makes it quite + // hard to only apply flags to things that are not build + // scripts and plugins though, so we do something more hacky + // instead to avoid applying the same RUSTFLAGS to multiple targets + // arches: + // + // 1) If --target is not specified we just apply RUSTFLAGS to + // all builds; they are all going to have the same target. + // + // 2) If --target *is* specified then we only apply RUSTFLAGS + // to compilation units with the Target kind, which indicates + // it was chosen by the --target flag. + // + // This means that, e.g. even if the specified --target is the + // same as the host, build scripts in plugins won't get + // RUSTFLAGS. + let compiling_with_target = build_config.requested_target.is_some(); + let is_target_kind = kind == Kind::Target; + + if compiling_with_target && !is_target_kind { + // This is probably a build script or plugin and we're + // compiling with --target. In this scenario there are + // no rustflags we can apply. + return Ok(Vec::new()); + } + + // First try RUSTFLAGS from the environment + if let Ok(a) = env::var(name) { + let args = a.split(' ') + .map(str::trim) + .filter(|s| !s.is_empty()) + .map(str::to_string); + return Ok(args.collect()); + } + + let mut rustflags = Vec::new(); + + let name = name.chars().flat_map(|c| c.to_lowercase()).collect::(); + // Then the target.*.rustflags value... + let target = build_config.requested_target.as_ref().unwrap_or(&build_config.host_triple); + let key = format!("target.{}.{}", target, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + // ...including target.'cfg(...)'.rustflags + if let Some(ref target_cfg) = target_info.cfg { + if let Some(table) = config.get_table("target")? { + let cfgs = table.val.keys().filter_map(|t| { + if t.starts_with("cfg(") && t.ends_with(')') { + let cfg = &t[4..t.len() - 1]; + CfgExpr::from_str(cfg) + .ok() + .and_then(|c| if c.matches(target_cfg) { Some(t) } else { None }) + } else { + None + } + }); + for n in cfgs { + let key = format!("target.{}.{}", n, name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + rustflags.extend(args); + } + } + } + } + + if !rustflags.is_empty() { + return Ok(rustflags); + } + + // Then the build.rustflags value + let key = format!("build.{}", name); + if let Some(args) = config.get_list_or_split_string(&key)? { + let args = args.val.into_iter(); + return Ok(args.collect()); + } + + Ok(Vec::new()) +} + +impl fmt::Display for Metadata { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:016x}", self.0) + } +} + +/// Takes rustc output (using specialized command line args), and calculates the file prefix and +/// suffix for the given crate type, or returns None if the type is not supported. (e.g. for a +/// rust library like libcargo.rlib, prefix = "lib", suffix = "rlib"). +/// +/// The caller needs to ensure that the lines object is at the correct line for the given crate +/// type: this is not checked. +// This function can not handle more than 1 file per type (with wasm32-unknown-emscripten, there +// are 2 files for bin (.wasm and .js)) +fn parse_crate_type( + crate_type: &str, + error: &str, + lines: &mut str::Lines, +) -> CargoResult> { + let not_supported = error.lines().any(|line| { + (line.contains("unsupported crate type") || + line.contains("unknown crate type")) && + line.contains(crate_type) + }); + if not_supported { + return Ok(None); + } + let line = match lines.next() { + Some(line) => line, + None => bail!("malformed output when learning about \ + crate-type {} information", crate_type), + }; + let mut parts = line.trim().split("___"); + let prefix = parts.next().unwrap(); + let suffix = match parts.next() { + Some(part) => part, + None => bail!("output of --print=file-names has changed in \ + the compiler, cannot parse"), + }; + + Ok(Some((prefix.to_string(), suffix.to_string()))) +} + +// (not a rustdoc) +// Return a list of 3-tuples (suffix, file_type, should_replace_hyphens). +// +// should_replace_hyphens will be used by the caller to replace "-" with "_" +// in a bin_stem. See the caller side (calc_target_filenames()) for details. +fn add_target_specific_suffixes( + target_triple: &str, + crate_type: &str, + target_kind: &TargetKind, + suffix: &str, + file_type: TargetFileType, +) -> Vec<(String, TargetFileType, bool)> { + let mut ret = vec![(suffix.to_string(), file_type, false)]; + + // rust-lang/cargo#4500 + if target_triple.ends_with("pc-windows-msvc") && crate_type.ends_with("dylib") && + suffix == ".dll" + { + ret.push((".dll.lib".to_string(), TargetFileType::Normal, false)); + } + + // rust-lang/cargo#4535 + if target_triple.starts_with("wasm32-") && crate_type == "bin" && + suffix == ".js" + { + ret.push((".wasm".to_string(), TargetFileType::Normal, true)); + } + + // rust-lang/cargo#4490 + // - only uplift *.dSYM for binaries. + // tests are run directly from target/debug/deps/ + // and examples are inside target/debug/examples/ which already have *.dSYM next to them + // so no need to do anything. + if target_triple.contains("-apple-") && *target_kind == TargetKind::Bin { + ret.push((".dSYM".to_string(), TargetFileType::DebugInfo, false)); + } + + ret +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/custom_build.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/custom_build.rs new file mode 100644 index 000000000..ee51b9b3b --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/custom_build.rs @@ -0,0 +1,538 @@ +use std::collections::{HashMap, BTreeSet, HashSet}; +use std::fs; +use std::path::{PathBuf, Path}; +use std::str; +use std::sync::{Mutex, Arc}; + +use core::PackageId; +use util::{Freshness, Cfg}; +use util::errors::{CargoResult, CargoResultExt, CargoError}; +use util::{internal, profile, paths}; +use util::machine_message; + +use super::job::Work; +use super::{fingerprint, Kind, Context, Unit}; + +/// Contains the parsed output of a custom build script. +#[derive(Clone, Debug, Hash)] +pub struct BuildOutput { + /// Paths to pass to rustc with the `-L` flag + pub library_paths: Vec, + /// Names and link kinds of libraries, suitable for the `-l` flag + pub library_links: Vec, + /// Various `--cfg` flags to pass to the compiler + pub cfgs: Vec, + /// Additional environment variables to run the compiler with. + pub env: Vec<(String, String)>, + /// Metadata to pass to the immediate dependencies + pub metadata: Vec<(String, String)>, + /// Paths to trigger a rerun of this build script. + pub rerun_if_changed: Vec, + /// Environment variables which, when changed, will cause a rebuild. + pub rerun_if_env_changed: Vec, + /// Warnings generated by this build, + pub warnings: Vec, +} + +/// Map of packages to build info +pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; + +/// Build info and overrides +pub struct BuildState { + pub outputs: Mutex, + overrides: HashMap<(String, Kind), BuildOutput>, +} + +#[derive(Default)] +pub struct BuildScripts { + // Cargo will use this `to_link` vector to add -L flags to compiles as we + // propagate them upwards towards the final build. Note, however, that we + // need to preserve the ordering of `to_link` to be topologically sorted. + // This will ensure that build scripts which print their paths properly will + // correctly pick up the files they generated (if there are duplicates + // elsewhere). + // + // To preserve this ordering, the (id, kind) is stored in two places, once + // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain + // this as we're building interactively below to ensure that the memory + // usage here doesn't blow up too much. + // + // For more information, see #2354 + pub to_link: Vec<(PackageId, Kind)>, + seen_to_link: HashSet<(PackageId, Kind)>, + pub plugins: BTreeSet, +} + +pub struct BuildDeps { + pub build_script_output: PathBuf, + pub rerun_if_changed: Vec, + pub rerun_if_env_changed: Vec, +} + +/// Prepares a `Work` that executes the target as a custom build script. +/// +/// The `req` given is the requirement which this run of the build script will +/// prepare work for. If the requirement is specified as both the target and the +/// host platforms it is assumed that the two are equal and the build script is +/// only run once (not twice). +pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) + -> CargoResult<(Work, Work, Freshness)> { + let _p = profile::start(format!("build script prepare: {}/{}", + unit.pkg, unit.target.name())); + + let key = (unit.pkg.package_id().clone(), unit.kind); + let overridden = cx.build_script_overridden.contains(&key); + let (work_dirty, work_fresh) = if overridden { + (Work::noop(), Work::noop()) + } else { + build_work(cx, unit)? + }; + + // Now that we've prep'd our work, build the work needed to manage the + // fingerprint and then start returning that upwards. + let (freshness, dirty, fresh) = + fingerprint::prepare_build_cmd(cx, unit)?; + + Ok((work_dirty.then(dirty), work_fresh.then(fresh), freshness)) +} + +fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) + -> CargoResult<(Work, Work)> { + let dependencies = cx.dep_run_custom_build(unit)?; + let build_script_unit = dependencies.iter().find(|d| { + !d.profile.run_custom_build && d.target.is_custom_build() + }).expect("running a script not depending on an actual script"); + let script_output = cx.build_script_dir(build_script_unit); + let build_output = cx.build_script_out_dir(unit); + + // Building the command to execute + let to_exec = script_output.join(unit.target.name()); + + // Start preparing the process to execute, starting out with some + // environment variables. Note that the profile-related environment + // variables are not set with this the build script's profile but rather the + // package's library profile. + let profile = cx.lib_profile(); + let to_exec = to_exec.into_os_string(); + let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; + cmd.env("OUT_DIR", &build_output) + .env("CARGO_MANIFEST_DIR", unit.pkg.root()) + .env("NUM_JOBS", &cx.jobs().to_string()) + .env("TARGET", &match unit.kind { + Kind::Host => cx.host_triple(), + Kind::Target => cx.target_triple(), + }) + .env("DEBUG", &profile.debuginfo.is_some().to_string()) + .env("OPT_LEVEL", &profile.opt_level) + .env("PROFILE", if cx.build_config.release { "release" } else { "debug" }) + .env("HOST", cx.host_triple()) + .env("RUSTC", &cx.config.rustc()?.path) + .env("RUSTDOC", &*cx.config.rustdoc()?) + .inherit_jobserver(&cx.jobserver); + + if let Some(links) = unit.pkg.manifest().links() { + cmd.env("CARGO_MANIFEST_LINKS", links); + } + + // Be sure to pass along all enabled features for this package, this is the + // last piece of statically known information that we have. + for feat in cx.resolve.features(unit.pkg.package_id()).iter() { + cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); + } + + let mut cfg_map = HashMap::new(); + for cfg in cx.cfg(unit.kind) { + match *cfg { + Cfg::Name(ref n) => { cfg_map.insert(n.clone(), None); } + Cfg::KeyPair(ref k, ref v) => { + match *cfg_map.entry(k.clone()).or_insert(Some(Vec::new())) { + Some(ref mut values) => values.push(v.clone()), + None => { /* ... */ } + } + } + } + } + for (k, v) in cfg_map { + let k = format!("CARGO_CFG_{}", super::envify(&k)); + match v { + Some(list) => { cmd.env(&k, list.join(",")); } + None => { cmd.env(&k, ""); } + } + } + + // Gather the set of native dependencies that this package has along with + // some other variables to close over. + // + // This information will be used at build-time later on to figure out which + // sorts of variables need to be discovered at that time. + let lib_deps = { + dependencies.iter().filter_map(|unit| { + if unit.profile.run_custom_build { + Some((unit.pkg.manifest().links().unwrap().to_string(), + unit.pkg.package_id().clone())) + } else { + None + } + }).collect::>() + }; + let pkg_name = unit.pkg.to_string(); + let build_state = Arc::clone(&cx.build_state); + let id = unit.pkg.package_id().clone(); + let (output_file, err_file) = { + let build_output_parent = build_output.parent().unwrap(); + let output_file = build_output_parent.join("output"); + let err_file = build_output_parent.join("stderr"); + (output_file, err_file) + }; + let all = (id.clone(), pkg_name.clone(), Arc::clone(&build_state), + output_file.clone()); + let build_scripts = super::load_build_deps(cx, unit); + let kind = unit.kind; + let json_messages = cx.build_config.json_messages; + + // Check to see if the build script has already run, and if it has keep + // track of whether it has told us about some explicit dependencies + let prev_output = BuildOutput::parse_file(&output_file, &pkg_name).ok(); + let deps = BuildDeps::new(&output_file, prev_output.as_ref()); + cx.build_explicit_deps.insert(*unit, deps); + + fs::create_dir_all(&script_output)?; + fs::create_dir_all(&build_output)?; + + let root_output = cx.target_root().to_path_buf(); + + // Prepare the unit of "dirty work" which will actually run the custom build + // command. + // + // Note that this has to do some extra work just before running the command + // to determine extra environment variables and such. + let dirty = Work::new(move |state| { + // Make sure that OUT_DIR exists. + // + // If we have an old build directory, then just move it into place, + // otherwise create it! + if fs::metadata(&build_output).is_err() { + fs::create_dir(&build_output).chain_err(|| { + internal("failed to create script output directory for \ + build command") + })?; + } + + // For all our native lib dependencies, pick up their metadata to pass + // along to this custom build command. We're also careful to augment our + // dynamic library search path in case the build script depended on any + // native dynamic libraries. + { + let build_state = build_state.outputs.lock().unwrap(); + for (name, id) in lib_deps { + let key = (id.clone(), kind); + let state = build_state.get(&key).ok_or_else(|| { + internal(format!("failed to locate build state for env \ + vars: {}/{:?}", id, kind)) + })?; + let data = &state.metadata; + for &(ref key, ref value) in data.iter() { + cmd.env(&format!("DEP_{}_{}", super::envify(&name), + super::envify(key)), value); + } + } + if let Some(build_scripts) = build_scripts { + super::add_plugin_deps(&mut cmd, &build_state, + &build_scripts, + &root_output)?; + } + } + + // And now finally, run the build command itself! + state.running(&cmd); + let output = cmd.exec_with_streaming( + &mut |out_line| { state.stdout(out_line); Ok(()) }, + &mut |err_line| { state.stderr(err_line); Ok(()) }, + true, + ).map_err(|e| { + CargoError::from( + format!("failed to run custom build command for `{}`\n{}", + pkg_name, e.description())) + + })?; + + + // After the build command has finished running, we need to be sure to + // remember all of its output so we can later discover precisely what it + // was, even if we don't run the build command again (due to freshness). + // + // This is also the location where we provide feedback into the build + // state informing what variables were discovered via our script as + // well. + paths::write(&output_file, &output.stdout)?; + paths::write(&err_file, &output.stderr)?; + let parsed_output = BuildOutput::parse(&output.stdout, &pkg_name)?; + + if json_messages { + let library_paths = parsed_output.library_paths.iter().map(|l| { + l.display().to_string() + }).collect::>(); + machine_message::emit(&machine_message::BuildScript { + package_id: &id, + linked_libs: &parsed_output.library_links, + linked_paths: &library_paths, + cfgs: &parsed_output.cfgs, + env: &parsed_output.env, + }); + } + + build_state.insert(id, kind, parsed_output); + Ok(()) + }); + + // Now that we've prepared our work-to-do, we need to prepare the fresh work + // itself to run when we actually end up just discarding what we calculated + // above. + let fresh = Work::new(move |_tx| { + let (id, pkg_name, build_state, output_file) = all; + let output = match prev_output { + Some(output) => output, + None => BuildOutput::parse_file(&output_file, &pkg_name)?, + }; + build_state.insert(id, kind, output); + Ok(()) + }); + + Ok((dirty, fresh)) +} + +impl BuildState { + pub fn new(config: &super::BuildConfig) -> BuildState { + let mut overrides = HashMap::new(); + let i1 = config.host.overrides.iter().map(|p| (p, Kind::Host)); + let i2 = config.target.overrides.iter().map(|p| (p, Kind::Target)); + for ((name, output), kind) in i1.chain(i2) { + overrides.insert((name.clone(), kind), output.clone()); + } + BuildState { + outputs: Mutex::new(HashMap::new()), + overrides: overrides, + } + } + + fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) { + self.outputs.lock().unwrap().insert((id, kind), output); + } +} + +impl BuildOutput { + pub fn parse_file(path: &Path, pkg_name: &str) -> CargoResult { + let contents = paths::read_bytes(path)?; + BuildOutput::parse(&contents, pkg_name) + } + + // Parses the output of a script. + // The `pkg_name` is used for error messages. + pub fn parse(input: &[u8], pkg_name: &str) -> CargoResult { + let mut library_paths = Vec::new(); + let mut library_links = Vec::new(); + let mut cfgs = Vec::new(); + let mut env = Vec::new(); + let mut metadata = Vec::new(); + let mut rerun_if_changed = Vec::new(); + let mut rerun_if_env_changed = Vec::new(); + let mut warnings = Vec::new(); + let whence = format!("build script of `{}`", pkg_name); + + for line in input.split(|b| *b == b'\n') { + let line = match str::from_utf8(line) { + Ok(line) => line.trim(), + Err(..) => continue, + }; + let mut iter = line.splitn(2, ':'); + if iter.next() != Some("cargo") { + // skip this line since it doesn't start with "cargo:" + continue; + } + let data = match iter.next() { + Some(val) => val, + None => continue + }; + + // getting the `key=value` part of the line + let mut iter = data.splitn(2, '='); + let key = iter.next(); + let value = iter.next(); + let (key, value) = match (key, value) { + (Some(a), Some(b)) => (a, b.trim_right()), + // line started with `cargo:` but didn't match `key=value` + _ => bail!("Wrong output in {}: `{}`", whence, line), + }; + + match key { + "rustc-flags" => { + let (paths, links) = + BuildOutput::parse_rustc_flags(value, &whence)?; + library_links.extend(links.into_iter()); + library_paths.extend(paths.into_iter()); + } + "rustc-link-lib" => library_links.push(value.to_string()), + "rustc-link-search" => library_paths.push(PathBuf::from(value)), + "rustc-cfg" => cfgs.push(value.to_string()), + "rustc-env" => env.push(BuildOutput::parse_rustc_env(value, &whence)?), + "warning" => warnings.push(value.to_string()), + "rerun-if-changed" => rerun_if_changed.push(value.to_string()), + "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), + _ => metadata.push((key.to_string(), value.to_string())), + } + } + + Ok(BuildOutput { + library_paths: library_paths, + library_links: library_links, + cfgs: cfgs, + env: env, + metadata: metadata, + rerun_if_changed: rerun_if_changed, + rerun_if_env_changed: rerun_if_env_changed, + warnings: warnings, + }) + } + + pub fn parse_rustc_flags(value: &str, whence: &str) + -> CargoResult<(Vec, Vec)> { + let value = value.trim(); + let mut flags_iter = value.split(|c: char| c.is_whitespace()) + .filter(|w| w.chars().any(|c| !c.is_whitespace())); + let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); + while let Some(flag) = flags_iter.next() { + if flag != "-l" && flag != "-L" { + bail!("Only `-l` and `-L` flags are allowed in {}: `{}`", + whence, value) + } + let value = match flags_iter.next() { + Some(v) => v, + None => bail!("Flag in rustc-flags has no value in {}: `{}`", + whence, value) + }; + match flag { + "-l" => library_links.push(value.to_string()), + "-L" => library_paths.push(PathBuf::from(value)), + + // was already checked above + _ => bail!("only -l and -L flags are allowed") + }; + } + Ok((library_paths, library_links)) + } + + pub fn parse_rustc_env(value: &str, whence: &str) + -> CargoResult<(String, String)> { + let mut iter = value.splitn(2, '='); + let name = iter.next(); + let val = iter.next(); + match (name, val) { + (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), + _ => bail!("Variable rustc-env has no value in {}: {}", whence, value), + } + } +} + +impl BuildDeps { + pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { + BuildDeps { + build_script_output: output_file.to_path_buf(), + rerun_if_changed: output.map(|p| &p.rerun_if_changed) + .cloned() + .unwrap_or_default(), + rerun_if_env_changed: output.map(|p| &p.rerun_if_env_changed) + .cloned() + .unwrap_or_default(), + } + } +} + +/// Compute the `build_scripts` map in the `Context` which tracks what build +/// scripts each package depends on. +/// +/// The global `build_scripts` map lists for all (package, kind) tuples what set +/// of packages' build script outputs must be considered. For example this lists +/// all dependencies' `-L` flags which need to be propagated transitively. +/// +/// The given set of targets to this function is the initial set of +/// targets/profiles which are being built. +pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, + units: &[Unit<'b>]) + -> CargoResult<()> { + let mut ret = HashMap::new(); + for unit in units { + build(&mut ret, cx, unit)?; + } + cx.build_scripts.extend(ret.into_iter().map(|(k, v)| { + (k, Arc::new(v)) + })); + return Ok(()); + + // Recursive function to build up the map we're constructing. This function + // memoizes all of its return values as it goes along. + fn build<'a, 'b, 'cfg>(out: &'a mut HashMap, BuildScripts>, + cx: &mut Context<'b, 'cfg>, + unit: &Unit<'b>) + -> CargoResult<&'a BuildScripts> { + // Do a quick pre-flight check to see if we've already calculated the + // set of dependencies. + if out.contains_key(unit) { + return Ok(&out[unit]) + } + + { + let key = unit.pkg.manifest().links().map(|l| (l.to_string(), unit.kind)); + let build_state = &cx.build_state; + if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { + let key = (unit.pkg.package_id().clone(), unit.kind); + cx.build_script_overridden.insert(key.clone()); + build_state + .outputs + .lock() + .unwrap() + .insert(key, output.clone()); + } + } + + let mut ret = BuildScripts::default(); + + if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { + add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); + } + + // We want to invoke the compiler deterministically to be cache-friendly + // to rustc invocation caching schemes, so be sure to generate the same + // set of build script dependency orderings via sorting the targets that + // come out of the `Context`. + let mut targets = cx.dep_targets(unit)?; + targets.sort_by_key(|u| u.pkg.package_id()); + + for unit in targets.iter() { + let dep_scripts = build(out, cx, unit)?; + + if unit.target.for_host() { + ret.plugins.extend(dep_scripts.to_link.iter() + .map(|p| &p.0).cloned()); + } else if unit.target.linkable() { + for &(ref pkg, kind) in dep_scripts.to_link.iter() { + add_to_link(&mut ret, pkg, kind); + } + } + } + + let prev = out.entry(*unit).or_insert(BuildScripts::default()); + for (pkg, kind) in ret.to_link { + add_to_link(prev, &pkg, kind); + } + prev.plugins.extend(ret.plugins); + Ok(prev) + } + + // When adding an entry to 'to_link' we only actually push it on if the + // script hasn't seen it yet (e.g. we don't push on duplicates). + fn add_to_link(scripts: &mut BuildScripts, pkg: &PackageId, kind: Kind) { + if scripts.seen_to_link.insert((pkg.clone(), kind)) { + scripts.to_link.push((pkg.clone(), kind)); + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/fingerprint.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/fingerprint.rs new file mode 100644 index 000000000..62a53a857 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/fingerprint.rs @@ -0,0 +1,722 @@ +use std::env; +use std::fs::{self, File, OpenOptions}; +use std::hash::{self, Hasher}; +use std::io::prelude::*; +use std::io::{BufReader, SeekFrom}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; + +use filetime::FileTime; +use serde::ser::{self, Serialize}; +use serde::de::{self, Deserialize}; +use serde_json; + +use core::{Package, TargetKind}; +use util; +use util::{Fresh, Dirty, Freshness, internal, profile}; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; + +use super::job::Work; +use super::context::{Context, Unit, TargetFileType}; +use super::custom_build::BuildDeps; + +/// A tuple result of the `prepare_foo` functions in this module. +/// +/// The first element of the triple is whether the target in question is +/// currently fresh or not, and the second two elements are work to perform when +/// the target is dirty or fresh, respectively. +/// +/// Both units of work are always generated because a fresh package may still be +/// rebuilt if some upstream dependency changes. +pub type Preparation = (Freshness, Work, Work); + +/// Prepare the necessary work for the fingerprint for a specific target. +/// +/// When dealing with fingerprints, cargo gets to choose what granularity +/// "freshness" is considered at. One option is considering freshness at the +/// package level. This means that if anything in a package changes, the entire +/// package is rebuilt, unconditionally. This simplicity comes at a cost, +/// however, in that test-only changes will cause libraries to be rebuilt, which +/// is quite unfortunate! +/// +/// The cost was deemed high enough that fingerprints are now calculated at the +/// layer of a target rather than a package. Each target can then be kept track +/// of separately and only rebuilt as necessary. This requires cargo to +/// understand what the inputs are to a target, so we drive rustc with the +/// --dep-info flag to learn about all input files to a unit of compilation. +/// +/// This function will calculate the fingerprint for a target and prepare the +/// work necessary to either write the fingerprint or copy over all fresh files +/// from the old directories to their new locations. +pub fn prepare_target<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>) -> CargoResult { + let _p = profile::start(format!("fingerprint: {} / {}", + unit.pkg.package_id(), unit.target.name())); + let new = cx.fingerprint_dir(unit); + let loc = new.join(&filename(cx, unit)); + + debug!("fingerprint at: {}", loc.display()); + + let fingerprint = calculate(cx, unit)?; + let compare = compare_old_fingerprint(&loc, &*fingerprint); + log_compare(unit, &compare); + + // If our comparison failed (e.g. we're going to trigger a rebuild of this + // crate), then we also ensure the source of the crate passes all + // verification checks before we build it. + // + // The `Source::verify` method is intended to allow sources to execute + // pre-build checks to ensure that the relevant source code is all + // up-to-date and as expected. This is currently used primarily for + // directory sources which will use this hook to perform an integrity check + // on all files in the source to ensure they haven't changed. If they have + // changed then an error is issued. + if compare.is_err() { + let source_id = unit.pkg.package_id().source_id(); + let sources = cx.packages.sources(); + let source = sources.get(source_id).ok_or_else(|| { + internal("missing package source") + })?; + source.verify(unit.pkg.package_id())?; + } + + let root = cx.out_dir(unit); + let mut missing_outputs = false; + if unit.profile.doc { + missing_outputs = !root.join(unit.target.crate_name()) + .join("index.html").exists(); + } else { + for &(ref src, ref link_dst, file_type) in cx.target_filenames(unit)?.iter() { + if file_type == TargetFileType::DebugInfo { + continue; + } + missing_outputs |= !src.exists(); + if let Some(ref link_dst) = *link_dst { + missing_outputs |= !link_dst.exists(); + } + } + } + + let allow_failure = unit.profile.rustc_args.is_some(); + let write_fingerprint = Work::new(move |_| { + match fingerprint.update_local() { + Ok(()) => {} + Err(..) if allow_failure => return Ok(()), + Err(e) => return Err(e) + } + write_fingerprint(&loc, &*fingerprint) + }); + + let fresh = compare.is_ok() && !missing_outputs; + Ok((if fresh {Fresh} else {Dirty}, write_fingerprint, Work::noop())) +} + +/// A fingerprint can be considered to be a "short string" representing the +/// state of a world for a package. +/// +/// If a fingerprint ever changes, then the package itself needs to be +/// recompiled. Inputs to the fingerprint include source code modifications, +/// compiler flags, compiler version, etc. This structure is not simply a +/// `String` due to the fact that some fingerprints cannot be calculated lazily. +/// +/// Path sources, for example, use the mtime of the corresponding dep-info file +/// as a fingerprint (all source files must be modified *before* this mtime). +/// This dep-info file is not generated, however, until after the crate is +/// compiled. As a result, this structure can be thought of as a fingerprint +/// to-be. The actual value can be calculated via `hash()`, but the operation +/// may fail as some files may not have been generated. +/// +/// Note that dependencies are taken into account for fingerprints because rustc +/// requires that whenever an upstream crate is recompiled that all downstream +/// dependants are also recompiled. This is typically tracked through +/// `DependencyQueue`, but it also needs to be retained here because Cargo can +/// be interrupted while executing, losing the state of the `DependencyQueue` +/// graph. +#[derive(Serialize, Deserialize)] +pub struct Fingerprint { + rustc: u64, + features: String, + target: u64, + profile: u64, + #[serde(serialize_with = "serialize_deps", deserialize_with = "deserialize_deps")] + deps: Vec<(String, Arc)>, + local: Vec, + #[serde(skip_serializing, skip_deserializing)] + memoized_hash: Mutex>, + rustflags: Vec, +} + +fn serialize_deps(deps: &[(String, Arc)], ser: S) + -> Result + where S: ser::Serializer, +{ + deps.iter().map(|&(ref a, ref b)| { + (a, b.hash()) + }).collect::>().serialize(ser) +} + +fn deserialize_deps<'de, D>(d: D) -> Result)>, D::Error> + where D: de::Deserializer<'de>, +{ + let decoded = >::deserialize(d)?; + Ok(decoded.into_iter().map(|(name, hash)| { + (name, Arc::new(Fingerprint { + rustc: 0, + target: 0, + profile: 0, + local: vec![LocalFingerprint::Precalculated(String::new())], + features: String::new(), + deps: Vec::new(), + memoized_hash: Mutex::new(Some(hash)), + rustflags: Vec::new(), + })) + }).collect()) +} + +#[derive(Serialize, Deserialize, Hash)] +enum LocalFingerprint { + Precalculated(String), + MtimeBased(MtimeSlot, PathBuf), + EnvBased(String, Option), +} + +struct MtimeSlot(Mutex>); + +impl Fingerprint { + fn update_local(&self) -> CargoResult<()> { + let mut hash_busted = false; + for local in self.local.iter() { + match *local { + LocalFingerprint::MtimeBased(ref slot, ref path) => { + let meta = fs::metadata(path) + .chain_err(|| { + internal(format!("failed to stat `{}`", path.display())) + })?; + let mtime = FileTime::from_last_modification_time(&meta); + *slot.0.lock().unwrap() = Some(mtime); + } + LocalFingerprint::EnvBased(..) | + LocalFingerprint::Precalculated(..) => continue, + } + hash_busted = true; + } + + if hash_busted { + *self.memoized_hash.lock().unwrap() = None; + } + Ok(()) + } + + fn hash(&self) -> u64 { + if let Some(s) = *self.memoized_hash.lock().unwrap() { + return s + } + let ret = util::hash_u64(self); + *self.memoized_hash.lock().unwrap() = Some(ret); + ret + } + + fn compare(&self, old: &Fingerprint) -> CargoResult<()> { + if self.rustc != old.rustc { + bail!("rust compiler has changed") + } + if self.features != old.features { + bail!("features have changed: {} != {}", self.features, old.features) + } + if self.target != old.target { + bail!("target configuration has changed") + } + if self.profile != old.profile { + bail!("profile configuration has changed") + } + if self.rustflags != old.rustflags { + return Err(internal("RUSTFLAGS has changed")) + } + if self.local.len() != old.local.len() { + bail!("local lens changed"); + } + for (new, old) in self.local.iter().zip(&old.local) { + match (new, old) { + (&LocalFingerprint::Precalculated(ref a), + &LocalFingerprint::Precalculated(ref b)) => { + if a != b { + bail!("precalculated components have changed: {} != {}", + a, b) + } + } + (&LocalFingerprint::MtimeBased(ref on_disk_mtime, ref ap), + &LocalFingerprint::MtimeBased(ref previously_built_mtime, ref bp)) => { + let on_disk_mtime = on_disk_mtime.0.lock().unwrap(); + let previously_built_mtime = previously_built_mtime.0.lock().unwrap(); + + let should_rebuild = match (*on_disk_mtime, *previously_built_mtime) { + (None, None) => false, + (Some(_), None) | (None, Some(_)) => true, + (Some(on_disk), Some(previously_built)) => on_disk > previously_built, + }; + + if should_rebuild { + bail!("mtime based components have changed: previously {:?} now {:?}, \ + paths are {:?} and {:?}", + *previously_built_mtime, *on_disk_mtime, ap, bp) + } + } + (&LocalFingerprint::EnvBased(ref akey, ref avalue), + &LocalFingerprint::EnvBased(ref bkey, ref bvalue)) => { + if *akey != *bkey { + bail!("env vars changed: {} != {}", akey, bkey); + } + if *avalue != *bvalue { + bail!("env var `{}` changed: previously {:?} now {:?}", + akey, bvalue, avalue) + } + } + _ => bail!("local fingerprint type has changed"), + } + } + + if self.deps.len() != old.deps.len() { + bail!("number of dependencies has changed") + } + for (a, b) in self.deps.iter().zip(old.deps.iter()) { + if a.1.hash() != b.1.hash() { + bail!("new ({}) != old ({})", a.0, b.0) + } + } + Ok(()) + } +} + +impl hash::Hash for Fingerprint { + fn hash(&self, h: &mut H) { + let Fingerprint { + rustc, + ref features, + target, + profile, + ref deps, + ref local, + memoized_hash: _, + ref rustflags, + } = *self; + (rustc, features, target, profile, local, rustflags).hash(h); + + h.write_usize(deps.len()); + for &(ref name, ref fingerprint) in deps { + name.hash(h); + // use memoized dep hashes to avoid exponential blowup + h.write_u64(Fingerprint::hash(fingerprint)); + } + } +} + +impl hash::Hash for MtimeSlot { + fn hash(&self, h: &mut H) { + self.0.lock().unwrap().hash(h) + } +} + +impl ser::Serialize for MtimeSlot { + fn serialize(&self, s: S) -> Result + where S: ser::Serializer, + { + self.0.lock().unwrap().map(|ft| { + (ft.seconds_relative_to_1970(), ft.nanoseconds()) + }).serialize(s) + } +} + +impl<'de> de::Deserialize<'de> for MtimeSlot { + fn deserialize(d: D) -> Result + where D: de::Deserializer<'de>, + { + let kind: Option<(u64, u32)> = de::Deserialize::deserialize(d)?; + Ok(MtimeSlot(Mutex::new(kind.map(|(s, n)| { + FileTime::from_seconds_since_1970(s, n) + })))) + } +} + +/// Calculates the fingerprint for a package/target pair. +/// +/// This fingerprint is used by Cargo to learn about when information such as: +/// +/// * A non-path package changes (changes version, changes revision, etc). +/// * Any dependency changes +/// * The compiler changes +/// * The set of features a package is built with changes +/// * The profile a target is compiled with changes (e.g. opt-level changes) +/// +/// Information like file modification time is only calculated for path +/// dependencies and is calculated in `calculate_target_fresh`. +fn calculate<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) + -> CargoResult> { + if let Some(s) = cx.fingerprints.get(unit) { + return Ok(Arc::clone(s)) + } + + // Next, recursively calculate the fingerprint for all of our dependencies. + // + // Skip the fingerprints of build scripts as they may not always be + // available and the dirtiness propagation for modification is tracked + // elsewhere. Also skip fingerprints of binaries because they don't actually + // induce a recompile, they're just dependencies in the sense that they need + // to be built. + let deps = cx.dep_targets(unit)?; + let deps = deps.iter().filter(|u| { + !u.target.is_custom_build() && !u.target.is_bin() + }).map(|unit| { + calculate(cx, unit).map(|fingerprint| { + (unit.pkg.package_id().to_string(), fingerprint) + }) + }).collect::>>()?; + + // And finally, calculate what our own local fingerprint is + let local = if use_dep_info(unit) { + let dep_info = dep_info_loc(cx, unit); + let mtime = dep_info_mtime_if_fresh(&dep_info)?; + LocalFingerprint::MtimeBased(MtimeSlot(Mutex::new(mtime)), dep_info) + } else { + let fingerprint = pkg_fingerprint(cx, unit.pkg)?; + LocalFingerprint::Precalculated(fingerprint) + }; + let mut deps = deps; + deps.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); + let extra_flags = if unit.profile.doc { + cx.rustdocflags_args(unit)? + } else { + cx.rustflags_args(unit)? + }; + let fingerprint = Arc::new(Fingerprint { + rustc: util::hash_u64(&cx.config.rustc()?.verbose_version), + target: util::hash_u64(&unit.target), + profile: util::hash_u64(&unit.profile), + features: format!("{:?}", cx.resolve.features_sorted(unit.pkg.package_id())), + deps: deps, + local: vec![local], + memoized_hash: Mutex::new(None), + rustflags: extra_flags, + }); + cx.fingerprints.insert(*unit, Arc::clone(&fingerprint)); + Ok(fingerprint) +} + + +// We want to use the mtime for files if we're a path source, but if we're a +// git/registry source, then the mtime of files may fluctuate, but they won't +// change so long as the source itself remains constant (which is the +// responsibility of the source) +fn use_dep_info(unit: &Unit) -> bool { + let path = unit.pkg.summary().source_id().is_path(); + !unit.profile.doc && path +} + +/// Prepare the necessary work for the fingerprint of a build command. +/// +/// Build commands are located on packages, not on targets. Additionally, we +/// don't have --dep-info to drive calculation of the fingerprint of a build +/// command. This brings up an interesting predicament which gives us a few +/// options to figure out whether a build command is dirty or not: +/// +/// 1. A build command is dirty if *any* file in a package changes. In theory +/// all files are candidate for being used by the build command. +/// 2. A build command is dirty if any file in a *specific directory* changes. +/// This may lose information as it may require files outside of the specific +/// directory. +/// 3. A build command must itself provide a dep-info-like file stating how it +/// should be considered dirty or not. +/// +/// The currently implemented solution is option (1), although it is planned to +/// migrate to option (2) in the near future. +pub fn prepare_build_cmd<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) + -> CargoResult { + let _p = profile::start(format!("fingerprint build cmd: {}", + unit.pkg.package_id())); + let new = cx.fingerprint_dir(unit); + let loc = new.join("build"); + + debug!("fingerprint at: {}", loc.display()); + + let (local, output_path) = build_script_local_fingerprints(cx, unit)?; + let mut fingerprint = Fingerprint { + rustc: 0, + target: 0, + profile: 0, + features: String::new(), + deps: Vec::new(), + local: local, + memoized_hash: Mutex::new(None), + rustflags: Vec::new(), + }; + let compare = compare_old_fingerprint(&loc, &fingerprint); + log_compare(unit, &compare); + + // When we write out the fingerprint, we may want to actually change the + // kind of fingerprint being recorded. If we started out, then the previous + // run of the build script (or if it had never run before) may indicate to + // use the `Precalculated` variant with the `pkg_fingerprint`. If the build + // script then prints `rerun-if-changed`, however, we need to record what's + // necessary for that fingerprint. + // + // Hence, if there were some `rerun-if-changed` directives forcibly change + // the kind of fingerprint by reinterpreting the dependencies output by the + // build script. + let state = Arc::clone(&cx.build_state); + let key = (unit.pkg.package_id().clone(), unit.kind); + let root = unit.pkg.root().to_path_buf(); + let write_fingerprint = Work::new(move |_| { + if let Some(output_path) = output_path { + let outputs = state.outputs.lock().unwrap(); + let outputs = &outputs[&key]; + if !outputs.rerun_if_changed.is_empty() || + !outputs.rerun_if_env_changed.is_empty() { + let deps = BuildDeps::new(&output_path, Some(outputs)); + fingerprint.local = local_fingerprints_deps(&deps, &root); + fingerprint.update_local()?; + } + } + write_fingerprint(&loc, &fingerprint) + }); + + Ok((if compare.is_ok() {Fresh} else {Dirty}, write_fingerprint, Work::noop())) +} + +fn build_script_local_fingerprints<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>) + -> CargoResult<(Vec, Option)> +{ + let state = cx.build_state.outputs.lock().unwrap(); + // First up, if this build script is entirely overridden, then we just + // return the hash of what we overrode it with. + // + // Note that the `None` here means that we don't want to update the local + // fingerprint afterwards because this is all just overridden. + if let Some(output) = state.get(&(unit.pkg.package_id().clone(), unit.kind)) { + debug!("override local fingerprints deps"); + let s = format!("overridden build state with hash: {}", + util::hash_u64(output)); + return Ok((vec![LocalFingerprint::Precalculated(s)], None)) + } + + // Next up we look at the previously listed dependencies for the build + // script. If there are none then we're in the "old mode" where we just + // assume that we're changed if anything in the packaged changed. The + // `Some` here though means that we want to update our local fingerprints + // after we're done as running this build script may have created more + // dependencies. + let deps = &cx.build_explicit_deps[unit]; + let output = deps.build_script_output.clone(); + if deps.rerun_if_changed.is_empty() && deps.rerun_if_env_changed.is_empty() { + debug!("old local fingerprints deps"); + let s = pkg_fingerprint(cx, unit.pkg)?; + return Ok((vec![LocalFingerprint::Precalculated(s)], Some(output))) + } + + // Ok so now we're in "new mode" where we can have files listed as + // dependencies as well as env vars listed as dependencies. Process them all + // here. + Ok((local_fingerprints_deps(deps, unit.pkg.root()), Some(output))) +} + +fn local_fingerprints_deps(deps: &BuildDeps, root: &Path) -> Vec { + debug!("new local fingerprints deps"); + let mut local = Vec::new(); + if !deps.rerun_if_changed.is_empty() { + let output = &deps.build_script_output; + let deps = deps.rerun_if_changed.iter().map(|p| root.join(p)); + let mtime = mtime_if_fresh(output, deps); + let mtime = MtimeSlot(Mutex::new(mtime)); + local.push(LocalFingerprint::MtimeBased(mtime, output.clone())); + } + + for var in deps.rerun_if_env_changed.iter() { + let val = env::var(var).ok(); + local.push(LocalFingerprint::EnvBased(var.clone(), val)); + } + + local +} + +fn write_fingerprint(loc: &Path, fingerprint: &Fingerprint) -> CargoResult<()> { + let hash = fingerprint.hash(); + debug!("write fingerprint: {}", loc.display()); + paths::write(loc, util::to_hex(hash).as_bytes())?; + paths::write(&loc.with_extension("json"), + &serde_json::to_vec(&fingerprint).unwrap())?; + Ok(()) +} + +/// Prepare for work when a package starts to build +pub fn prepare_init<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult<()> { + let new1 = cx.fingerprint_dir(unit); + + if fs::metadata(&new1).is_err() { + fs::create_dir(&new1)?; + } + + Ok(()) +} + +pub fn dep_info_loc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> PathBuf { + cx.fingerprint_dir(unit).join(&format!("dep-{}", filename(cx, unit))) +} + +fn compare_old_fingerprint(loc: &Path, new_fingerprint: &Fingerprint) + -> CargoResult<()> { + let old_fingerprint_short = paths::read(loc)?; + let new_hash = new_fingerprint.hash(); + + if util::to_hex(new_hash) == old_fingerprint_short { + return Ok(()) + } + + let old_fingerprint_json = paths::read(&loc.with_extension("json"))?; + let old_fingerprint = serde_json::from_str(&old_fingerprint_json) + .chain_err(|| internal("failed to deserialize json"))?; + new_fingerprint.compare(&old_fingerprint) +} + +fn log_compare(unit: &Unit, compare: &CargoResult<()>) { + let ce = match *compare { + Ok(..) => return, + Err(ref e) => e, + }; + info!("fingerprint error for {}: {}", unit.pkg, ce); + + for cause in ce.iter() { + info!(" cause: {}", cause); + } +} + +// Parse the dep-info into a list of paths +pub fn parse_dep_info(dep_info: &Path) -> CargoResult>> { + macro_rules! fs_try { + ($e:expr) => (match $e { Ok(e) => e, Err(..) => return Ok(None) }) + } + let mut f = BufReader::new(fs_try!(File::open(dep_info))); + // see comments in append_current_dir for where this cwd is manifested from. + let mut cwd = Vec::new(); + if fs_try!(f.read_until(0, &mut cwd)) == 0 { + return Ok(None) + } + let cwd = util::bytes2path(&cwd[..cwd.len()-1])?; + let line = match f.lines().next() { + Some(Ok(line)) => line, + _ => return Ok(None), + }; + let pos = line.find(": ").ok_or_else(|| { + internal(format!("dep-info not in an understood format: {}", + dep_info.display())) + })?; + let deps = &line[pos + 2..]; + + let mut paths = Vec::new(); + let mut deps = deps.split(' ').map(|s| s.trim()).filter(|s| !s.is_empty()); + while let Some(s) = deps.next() { + let mut file = s.to_string(); + while file.ends_with('\\') { + file.pop(); + file.push(' '); + file.push_str(deps.next().ok_or_else(|| { + internal("malformed dep-info format, trailing \\".to_string()) + })?); + } + paths.push(cwd.join(&file)); + } + Ok(Some(paths)) +} + +fn dep_info_mtime_if_fresh(dep_info: &Path) -> CargoResult> { + if let Some(paths) = parse_dep_info(dep_info)? { + Ok(mtime_if_fresh(dep_info, paths.iter())) + } else { + Ok(None) + } +} + +fn pkg_fingerprint(cx: &Context, pkg: &Package) -> CargoResult { + let source_id = pkg.package_id().source_id(); + let sources = cx.packages.sources(); + + let source = sources.get(source_id).ok_or_else(|| { + internal("missing package source") + })?; + source.fingerprint(pkg) +} + +fn mtime_if_fresh(output: &Path, paths: I) -> Option + where I: IntoIterator, + I::Item: AsRef, +{ + let meta = match fs::metadata(output) { + Ok(meta) => meta, + Err(..) => return None, + }; + let mtime = FileTime::from_last_modification_time(&meta); + + let any_stale = paths.into_iter().any(|path| { + let path = path.as_ref(); + let meta = match fs::metadata(path) { + Ok(meta) => meta, + Err(..) => { + info!("stale: {} -- missing", path.display()); + return true + } + }; + let mtime2 = FileTime::from_last_modification_time(&meta); + if mtime2 > mtime { + info!("stale: {} -- {} vs {}", path.display(), mtime2, mtime); + true + } else { + false + } + }); + + if any_stale { + None + } else { + Some(mtime) + } +} + +fn filename<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> String { + // file_stem includes metadata hash. Thus we have a different + // fingerprint for every metadata hash version. This works because + // even if the package is fresh, we'll still link the fresh target + let file_stem = cx.file_stem(unit); + let kind = match *unit.target.kind() { + TargetKind::Lib(..) => "lib", + TargetKind::Bin => "bin", + TargetKind::Test => "integration-test", + TargetKind::ExampleBin | + TargetKind::ExampleLib(..) => "example", + TargetKind::Bench => "bench", + TargetKind::CustomBuild => "build-script", + }; + let flavor = if unit.profile.test { + "test-" + } else if unit.profile.doc { + "doc-" + } else { + "" + }; + format!("{}{}-{}", flavor, kind, file_stem) +} + +// The dep-info files emitted by the compiler all have their listed paths +// relative to whatever the current directory was at the time that the compiler +// was invoked. As the current directory may change over time, we need to record +// what that directory was at the beginning of the file so we can know about it +// next time. +pub fn append_current_dir(path: &Path, cwd: &Path) -> CargoResult<()> { + debug!("appending {} <- {}", path.display(), cwd.display()); + let mut f = OpenOptions::new().read(true).write(true).open(path)?; + let mut contents = Vec::new(); + f.read_to_end(&mut contents)?; + f.seek(SeekFrom::Start(0))?; + f.write_all(util::path2bytes(cwd)?)?; + f.write_all(&[0])?; + f.write_all(&contents)?; + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job.rs new file mode 100644 index 000000000..219a6d437 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job.rs @@ -0,0 +1,67 @@ +use std::fmt; + +use util::{CargoResult, Fresh, Dirty, Freshness}; +use super::job_queue::JobState; + +pub struct Job { dirty: Work, fresh: Work } + +/// Each proc should send its description before starting. +/// It should send either once or close immediately. +pub struct Work { + inner: Box FnBox<&'a JobState<'b>, CargoResult<()>> + Send>, +} + +trait FnBox { + fn call_box(self: Box, a: A) -> R; +} + +impl R> FnBox for F { + fn call_box(self: Box, a: A) -> R { + (*self)(a) + } +} + +impl Work { + pub fn new(f: F) -> Work + where F: FnOnce(&JobState) -> CargoResult<()> + Send + 'static + { + Work { inner: Box::new(f) } + } + + pub fn noop() -> Work { + Work::new(|_| Ok(())) + } + + pub fn call(self, tx: &JobState) -> CargoResult<()> { + self.inner.call_box(tx) + } + + pub fn then(self, next: Work) -> Work { + Work::new(move |state| { + self.call(state)?; + next.call(state) + }) + } +} + +impl Job { + /// Create a new job representing a unit of work. + pub fn new(dirty: Work, fresh: Work) -> Job { + Job { dirty: dirty, fresh: fresh } + } + + /// Consumes this job by running it, returning the result of the + /// computation. + pub fn run(self, fresh: Freshness, state: &JobState) -> CargoResult<()> { + match fresh { + Fresh => self.fresh.call(state), + Dirty => self.dirty.call(state), + } + } +} + +impl fmt::Debug for Job { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Job {{ ... }}") + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job_queue.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job_queue.rs new file mode 100644 index 000000000..5bfc5d458 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/job_queue.rs @@ -0,0 +1,426 @@ +use std::collections::HashSet; +use std::collections::hash_map::HashMap; +use std::fmt; +use std::io; +use std::mem; +use std::sync::mpsc::{channel, Sender, Receiver}; + +use crossbeam::{self, Scope}; +use jobserver::{Acquired, HelperThread}; + +use core::{PackageId, Target, Profile}; +use util::{Config, DependencyQueue, Fresh, Dirty, Freshness}; +use util::{CargoResult, ProcessBuilder, profile, internal, CargoResultExt}; +use {handle_error}; + +use super::{Context, Kind, Unit}; +use super::job::Job; + +/// A management structure of the entire dependency graph to compile. +/// +/// This structure is backed by the `DependencyQueue` type and manages the +/// actual compilation step of each package. Packages enqueue units of work and +/// then later on the entire graph is processed and compiled. +pub struct JobQueue<'a> { + queue: DependencyQueue, Vec<(Job, Freshness)>>, + tx: Sender>, + rx: Receiver>, + active: usize, + pending: HashMap, PendingBuild>, + compiled: HashSet<&'a PackageId>, + documented: HashSet<&'a PackageId>, + counts: HashMap<&'a PackageId, usize>, + is_release: bool, +} + +/// A helper structure for metadata about the state of a building package. +struct PendingBuild { + /// Number of jobs currently active + amt: usize, + /// Current freshness state of this package. Any dirty target within a + /// package will cause the entire package to become dirty. + fresh: Freshness, +} + +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +struct Key<'a> { + pkg: &'a PackageId, + target: &'a Target, + profile: &'a Profile, + kind: Kind, +} + +pub struct JobState<'a> { + tx: Sender>, +} + +enum Message<'a> { + Run(String), + Stdout(String), + Stderr(String), + Token(io::Result), + Finish(Key<'a>, CargoResult<()>), +} + +impl<'a> JobState<'a> { + pub fn running(&self, cmd: &ProcessBuilder) { + let _ = self.tx.send(Message::Run(cmd.to_string())); + } + + pub fn stdout(&self, out: &str) { + let _ = self.tx.send(Message::Stdout(out.to_string())); + } + + pub fn stderr(&self, err: &str) { + let _ = self.tx.send(Message::Stderr(err.to_string())); + } +} + +impl<'a> JobQueue<'a> { + pub fn new<'cfg>(cx: &Context<'a, 'cfg>) -> JobQueue<'a> { + let (tx, rx) = channel(); + JobQueue { + queue: DependencyQueue::new(), + tx: tx, + rx: rx, + active: 0, + pending: HashMap::new(), + compiled: HashSet::new(), + documented: HashSet::new(), + counts: HashMap::new(), + is_release: cx.build_config.release, + } + } + + pub fn enqueue<'cfg>(&mut self, + cx: &Context<'a, 'cfg>, + unit: &Unit<'a>, + job: Job, + fresh: Freshness) -> CargoResult<()> { + let key = Key::new(unit); + let deps = key.dependencies(cx)?; + self.queue.queue(Fresh, key, Vec::new(), &deps).push((job, fresh)); + *self.counts.entry(key.pkg).or_insert(0) += 1; + Ok(()) + } + + /// Execute all jobs necessary to build the dependency graph. + /// + /// This function will spawn off `config.jobs()` workers to build all of the + /// necessary dependencies, in order. Freshness is propagated as far as + /// possible along each dependency chain. + pub fn execute(&mut self, cx: &mut Context) -> CargoResult<()> { + let _p = profile::start("executing the job graph"); + + // We need to give a handle to the send half of our message queue to the + // jobserver helper thread. Unfortunately though we need the handle to be + // `'static` as that's typically what's required when spawning a + // thread! + // + // To work around this we transmute the `Sender` to a static lifetime. + // we're only sending "longer living" messages and we should also + // destroy all references to the channel before this function exits as + // the destructor for the `helper` object will ensure the associated + // thread is no longer running. + // + // As a result, this `transmute` to a longer lifetime should be safe in + // practice. + let tx = self.tx.clone(); + let tx = unsafe { + mem::transmute::>, Sender>>(tx) + }; + let helper = cx.jobserver.clone().into_helper_thread(move |token| { + drop(tx.send(Message::Token(token))); + }).chain_err(|| { + "failed to create helper thread for jobserver management" + })?; + + crossbeam::scope(|scope| { + self.drain_the_queue(cx, scope, &helper) + }) + } + + fn drain_the_queue(&mut self, + cx: &mut Context, + scope: &Scope<'a>, + jobserver_helper: &HelperThread) + -> CargoResult<()> { + use std::time::Instant; + + let mut tokens = Vec::new(); + let mut queue = Vec::new(); + trace!("queue: {:#?}", self.queue); + + // Iteratively execute the entire dependency graph. Each turn of the + // loop starts out by scheduling as much work as possible (up to the + // maximum number of parallel jobs we have tokens for). A local queue + // is maintained separately from the main dependency queue as one + // dequeue may actually dequeue quite a bit of work (e.g. 10 binaries + // in one project). + // + // After a job has finished we update our internal state if it was + // successful and otherwise wait for pending work to finish if it failed + // and then immediately return. + let mut error = None; + let start_time = Instant::now(); + loop { + // Dequeue as much work as we can, learning about everything + // possible that can run. Note that this is also the point where we + // start requesting job tokens. Each job after the first needs to + // request a token. + while let Some((fresh, key, jobs)) = self.queue.dequeue() { + let total_fresh = jobs.iter().fold(fresh, |fresh, &(_, f)| { + f.combine(fresh) + }); + self.pending.insert(key, PendingBuild { + amt: jobs.len(), + fresh: total_fresh, + }); + for (job, f) in jobs { + queue.push((key, job, f.combine(fresh))); + if self.active + queue.len() > 0 { + jobserver_helper.request_token(); + } + } + } + + // Now that we've learned of all possible work that we can execute + // try to spawn it so long as we've got a jobserver token which says + // we're able to perform some parallel work. + while error.is_none() && self.active < tokens.len() + 1 && !queue.is_empty() { + let (key, job, fresh) = queue.remove(0); + self.run(key, fresh, job, cx.config, scope)?; + } + + // If after all that we're not actually running anything then we're + // done! + if self.active == 0 { + break + } + + // And finally, before we block waiting for the next event, drop any + // excess tokens we may have accidentally acquired. Due to how our + // jobserver interface is architected we may acquire a token that we + // don't actually use, and if this happens just relinquish it back + // to the jobserver itself. + tokens.truncate(self.active - 1); + + match self.rx.recv().unwrap() { + Message::Run(cmd) => { + cx.config.shell().verbose(|c| c.status("Running", &cmd))?; + } + Message::Stdout(out) => { + if cx.config.extra_verbose() { + println!("{}", out); + } + } + Message::Stderr(err) => { + if cx.config.extra_verbose() { + writeln!(cx.config.shell().err(), "{}", err)?; + } + } + Message::Finish(key, result) => { + info!("end: {:?}", key); + self.active -= 1; + if self.active > 0 { + assert!(!tokens.is_empty()); + drop(tokens.pop()); + } + match result { + Ok(()) => self.finish(key, cx)?, + Err(e) => { + let msg = "The following warnings were emitted during compilation:"; + self.emit_warnings(Some(msg), key, cx)?; + + if self.active > 0 { + error = Some("build failed".into()); + handle_error(e, &mut *cx.config.shell()); + cx.config.shell().warn( + "build failed, waiting for other \ + jobs to finish...")?; + } + else { + error = Some(e); + } + } + } + } + Message::Token(acquired_token) => { + tokens.push(acquired_token.chain_err(|| { + "failed to acquire jobserver token" + })?); + } + } + } + + let build_type = if self.is_release { "release" } else { "dev" }; + let profile = cx.lib_profile(); + let mut opt_type = String::from(if profile.opt_level == "0" { "unoptimized" } + else { "optimized" }); + if profile.debuginfo.is_some() { + opt_type += " + debuginfo"; + } + let duration = start_time.elapsed(); + let time_elapsed = format!("{}.{1:.2} secs", + duration.as_secs(), + duration.subsec_nanos() / 10_000_000); + if self.queue.is_empty() { + let message = format!("{} [{}] target(s) in {}", + build_type, + opt_type, + time_elapsed); + cx.config.shell().status("Finished", message)?; + Ok(()) + } else if let Some(e) = error { + Err(e) + } else { + debug!("queue: {:#?}", self.queue); + Err(internal("finished with jobs still left in the queue")) + } + } + + /// Executes a job in the `scope` given, pushing the spawned thread's + /// handled onto `threads`. + fn run(&mut self, + key: Key<'a>, + fresh: Freshness, + job: Job, + config: &Config, + scope: &Scope<'a>) -> CargoResult<()> { + info!("start: {:?}", key); + + self.active += 1; + *self.counts.get_mut(key.pkg).unwrap() -= 1; + + let my_tx = self.tx.clone(); + let doit = move || { + let res = job.run(fresh, &JobState { + tx: my_tx.clone(), + }); + my_tx.send(Message::Finish(key, res)).unwrap(); + }; + match fresh { + Freshness::Fresh => doit(), + Freshness::Dirty => { scope.spawn(doit); } + } + + // Print out some nice progress information + self.note_working_on(config, &key, fresh)?; + + Ok(()) + } + + fn emit_warnings(&self, msg: Option<&str>, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { + let output = cx.build_state.outputs.lock().unwrap(); + if let Some(output) = output.get(&(key.pkg.clone(), key.kind)) { + if let Some(msg) = msg { + if !output.warnings.is_empty() { + writeln!(cx.config.shell().err(), "{}\n", msg)?; + } + } + + for warning in output.warnings.iter() { + cx.config.shell().warn(warning)?; + } + + if !output.warnings.is_empty() && msg.is_some() { + // Output an empty line. + writeln!(cx.config.shell().err(), "")?; + } + } + + Ok(()) + } + + fn finish(&mut self, key: Key<'a>, cx: &mut Context) -> CargoResult<()> { + if key.profile.run_custom_build && cx.show_warnings(key.pkg) { + self.emit_warnings(None, key, cx)?; + } + + let state = self.pending.get_mut(&key).unwrap(); + state.amt -= 1; + if state.amt == 0 { + self.queue.finish(&key, state.fresh); + } + Ok(()) + } + + // This isn't super trivial because we don't want to print loads and + // loads of information to the console, but we also want to produce a + // faithful representation of what's happening. This is somewhat nuanced + // as a package can start compiling *very* early on because of custom + // build commands and such. + // + // In general, we try to print "Compiling" for the first nontrivial task + // run for a package, regardless of when that is. We then don't print + // out any more information for a package after we've printed it once. + fn note_working_on(&mut self, + config: &Config, + key: &Key<'a>, + fresh: Freshness) -> CargoResult<()> { + if (self.compiled.contains(key.pkg) && !key.profile.doc) || + (self.documented.contains(key.pkg) && key.profile.doc) { + return Ok(()) + } + + match fresh { + // Any dirty stage which runs at least one command gets printed as + // being a compiled package + Dirty => { + if key.profile.doc { + if !key.profile.test { + self.documented.insert(key.pkg); + config.shell().status("Documenting", key.pkg)?; + } + } else { + self.compiled.insert(key.pkg); + config.shell().status("Compiling", key.pkg)?; + } + } + Fresh if self.counts[key.pkg] == 0 => { + self.compiled.insert(key.pkg); + config.shell().verbose(|c| c.status("Fresh", key.pkg))?; + } + Fresh => {} + } + Ok(()) + } +} + +impl<'a> Key<'a> { + fn new(unit: &Unit<'a>) -> Key<'a> { + Key { + pkg: unit.pkg.package_id(), + target: unit.target, + profile: unit.profile, + kind: unit.kind, + } + } + + fn dependencies<'cfg>(&self, cx: &Context<'a, 'cfg>) + -> CargoResult>> { + let unit = Unit { + pkg: cx.get_package(self.pkg)?, + target: self.target, + profile: self.profile, + kind: self.kind, + }; + let targets = cx.dep_targets(&unit)?; + Ok(targets.iter().filter_map(|unit| { + // Binaries aren't actually needed to *compile* tests, just to run + // them, so we don't include this dependency edge in the job graph. + if self.target.is_test() && unit.target.is_bin() { + None + } else { + Some(Key::new(unit)) + } + }).collect()) + } +} + +impl<'a> fmt::Debug for Key<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} => {}/{} => {:?}", self.pkg, self.target, self.profile, + self.kind) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/layout.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/layout.rs new file mode 100644 index 000000000..464a68945 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/layout.rs @@ -0,0 +1,191 @@ +//! Management of the directory layout of a build +//! +//! The directory layout is a little tricky at times, hence a separate file to +//! house this logic. The current layout looks like this: +//! +//! ```ignore +//! # This is the root directory for all output, the top-level package +//! # places all of its output here. +//! target/ +//! +//! # This is the root directory for all output of *dependencies* +//! deps/ +//! +//! # Root directory for all compiled examples +//! examples/ +//! +//! # This is the location at which the output of all custom build +//! # commands are rooted +//! build/ +//! +//! # Each package gets its own directory where its build script and +//! # script output are placed +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Each directory package has a `out` directory where output +//! # is placed. +//! out/ +//! +//! # This is the location at which the output of all old custom build +//! # commands are rooted +//! native/ +//! +//! # Each package gets its own directory for where its output is +//! # placed. We can't track exactly what's getting put in here, so +//! # we just assume that all relevant output is in these +//! # directories. +//! $pkg1/ +//! $pkg2/ +//! $pkg3/ +//! +//! # Directory used to store incremental data for the compiler (when +//! # incremental is enabled. +//! incremental/ +//! +//! # Hidden directory that holds all of the fingerprint files for all +//! # packages +//! .fingerprint/ +//! ``` + +use std::fs; +use std::io; +use std::path::{PathBuf, Path}; + +use core::Workspace; +use util::{Config, FileLock, CargoResult, Filesystem}; + +/// Contains the paths of all target output locations. +/// +/// See module docs for more information. +pub struct Layout { + root: PathBuf, + deps: PathBuf, + native: PathBuf, + build: PathBuf, + incremental: PathBuf, + fingerprint: PathBuf, + examples: PathBuf, + /// The lockfile for a build, will be unlocked when this struct is `drop`ped. + _lock: FileLock, +} + +pub fn is_bad_artifact_name(name: &str) -> bool { + ["deps", "examples", "build", "native", "incremental"] + .iter() + .any(|&reserved| reserved == name) +} + +impl Layout { + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + /// + /// Differs from `at` in that this calculates the root path from the workspace target directory, + /// adding the target triple and the profile (debug, release, ...). + pub fn new(ws: &Workspace, + triple: Option<&str>, + dest: &str) -> CargoResult { + let mut path = ws.target_dir(); + // Flexible target specifications often point at filenames, so interpret + // the target triple as a Path and then just use the file stem as the + // component for the directory name. + if let Some(triple) = triple { + path.push(Path::new(triple).file_stem().ok_or_else(|| "target was empty")?); + } + path.push(dest); + Layout::at(ws.config(), path) + } + + /// Calculate the paths for build output, lock the build directory, and return as a Layout. + /// + /// This function will block if the directory is already locked. + pub fn at(config: &Config, root: Filesystem) -> CargoResult { + // For now we don't do any more finer-grained locking on the artifact + // directory, so just lock the entire thing for the duration of this + // compile. + let lock = root.open_rw(".cargo-lock", config, "build directory")?; + let root = root.into_path_unlocked(); + + Ok(Layout { + deps: root.join("deps"), + native: root.join("native"), + build: root.join("build"), + incremental: root.join("incremental"), + fingerprint: root.join(".fingerprint"), + examples: root.join("examples"), + root: root, + _lock: lock, + }) + } + + #[cfg(not(target_os = "macos"))] + fn exclude_from_backups(&self, _: &Path) {} + + #[cfg(target_os = "macos")] + /// Marks files or directories as excluded from Time Machine on macOS + /// + /// This is recommended to prevent derived/temporary files from bloating backups. + fn exclude_from_backups(&self, path: &Path) { + use std::ptr; + use core_foundation::{url, number, string}; + use core_foundation::base::TCFType; + + // For compatibility with 10.7 a string is used instead of global kCFURLIsExcludedFromBackupKey + let is_excluded_key: Result = "NSURLIsExcludedFromBackupKey".parse(); + match (url::CFURL::from_path(path, false), is_excluded_key) { + (Some(path), Ok(is_excluded_key)) => unsafe { + url::CFURLSetResourcePropertyForKey( + path.as_concrete_TypeRef(), + is_excluded_key.as_concrete_TypeRef(), + number::kCFBooleanTrue as *const _, + ptr::null_mut(), + ); + }, + // Errors are ignored, since it's an optional feature and failure + // doesn't prevent Cargo from working + _ => {} + } + } + + /// Make sure all directories stored in the Layout exist on the filesystem. + pub fn prepare(&mut self) -> io::Result<()> { + if fs::metadata(&self.root).is_err() { + fs::create_dir_all(&self.root)?; + } + + self.exclude_from_backups(&self.root); + + mkdir(&self.deps)?; + mkdir(&self.native)?; + mkdir(&self.incremental)?; + mkdir(&self.fingerprint)?; + mkdir(&self.examples)?; + mkdir(&self.build)?; + + return Ok(()); + + fn mkdir(dir: &Path) -> io::Result<()> { + if fs::metadata(&dir).is_err() { + fs::create_dir(dir)?; + } + Ok(()) + } + } + + /// Fetch the root path. + pub fn dest(&self) -> &Path { &self.root } + /// Fetch the deps path. + pub fn deps(&self) -> &Path { &self.deps } + /// Fetch the examples path. + pub fn examples(&self) -> &Path { &self.examples } + /// Fetch the root path. + pub fn root(&self) -> &Path { &self.root } + /// Fetch the incremental path. + pub fn incremental(&self) -> &Path { &self.incremental } + /// Fetch the fingerprint path. + pub fn fingerprint(&self) -> &Path { &self.fingerprint } + /// Fetch the build path. + pub fn build(&self) -> &Path { &self.build } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/links.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/links.rs new file mode 100644 index 000000000..79bb240cf --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/links.rs @@ -0,0 +1,64 @@ +use std::collections::{HashMap, HashSet}; +use std::fmt::Write; + +use core::{Resolve, PackageId}; +use util::CargoResult; +use super::Unit; + +pub struct Links<'a> { + validated: HashSet<&'a PackageId>, + links: HashMap, +} + +impl<'a> Links<'a> { + pub fn new() -> Links<'a> { + Links { + validated: HashSet::new(), + links: HashMap::new(), + } + } + + pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'a>) -> CargoResult<()> { + if !self.validated.insert(unit.pkg.package_id()) { + return Ok(()) + } + let lib = match unit.pkg.manifest().links() { + Some(lib) => lib, + None => return Ok(()), + }; + if let Some(prev) = self.links.get(lib) { + let pkg = unit.pkg.package_id(); + + let describe_path = |pkgid: &PackageId| -> String { + let dep_path = resolve.path_to_top(pkgid); + if dep_path.is_empty() { + String::from("The root-package ") + } else { + let mut dep_path_desc = format!("Package `{}`\n", pkgid); + for dep in dep_path { + write!(dep_path_desc, + " ... which is depended on by `{}`\n", + dep).unwrap(); + } + dep_path_desc + } + }; + + bail!("Multiple packages link to native library `{}`. \ + A native library can be linked only once.\n\ + \n\ + {}links to native library `{}`.\n\ + \n\ + {}also links to native library `{}`.", + lib, + describe_path(prev), lib, + describe_path(pkg), lib) + } + if !unit.pkg.manifest().targets().iter().any(|t| t.is_custom_build()) { + bail!("package `{}` specifies that it links to `{}` but does not \ + have a custom build script", unit.pkg.package_id(), lib) + } + self.links.insert(lib.to_string(), unit.pkg.package_id()); + Ok(()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/mod.rs new file mode 100644 index 000000000..8dce388c0 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/mod.rs @@ -0,0 +1,941 @@ +use std::collections::{HashMap, HashSet}; +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs; +use std::io::{self, Write}; +use std::path::{self, PathBuf}; +use std::sync::Arc; + +use same_file::is_same_file; +use serde_json; + +use core::{Package, PackageId, PackageSet, Target, Resolve}; +use core::{Profile, Profiles, Workspace}; +use core::shell::ColorChoice; +use util::{self, ProcessBuilder, machine_message}; +use util::{Config, internal, profile, join_paths}; +use util::errors::{CargoResult, CargoResultExt}; +use util::Freshness; + +use self::job::{Job, Work}; +use self::job_queue::JobQueue; + +use self::output_depinfo::output_depinfo; + +pub use self::compilation::Compilation; +pub use self::context::{Context, Unit, TargetFileType}; +pub use self::custom_build::{BuildOutput, BuildMap, BuildScripts}; +pub use self::layout::is_bad_artifact_name; + +mod compilation; +mod context; +mod custom_build; +mod fingerprint; +mod job; +mod job_queue; +mod layout; +mod links; +mod output_depinfo; + +/// Whether an object is for the host arch, or the target arch. +/// +/// These will be the same unless cross-compiling. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord)] +pub enum Kind { Host, Target } + +/// Configuration information for a rustc build. +#[derive(Default, Clone)] +pub struct BuildConfig { + /// The host arch triple + /// + /// e.g. x86_64-unknown-linux-gnu, would be + /// - machine: x86_64 + /// - hardware-platform: unknown + /// - operating system: linux-gnu + pub host_triple: String, + /// Build information for the host arch + pub host: TargetConfig, + /// The target arch triple, defaults to host arch + pub requested_target: Option, + /// Build information for the target + pub target: TargetConfig, + /// How many rustc jobs to run in parallel + pub jobs: u32, + /// Whether we are building for release + pub release: bool, + /// Whether we are running tests + pub test: bool, + /// Whether we are building documentation + pub doc_all: bool, + /// Whether to print std output in json format (for machine reading) + pub json_messages: bool, +} + +/// Information required to build for a target +#[derive(Clone, Default)] +pub struct TargetConfig { + /// The path of archiver (lib builder) for this target. + pub ar: Option, + /// The path of the linker for this target. + pub linker: Option, + /// Special build options for any necessary input files (filename -> options) + pub overrides: HashMap, +} + +pub type PackagesToBuild<'a> = [(&'a Package, Vec<(&'a Target, &'a Profile)>)]; + +/// A glorified callback for executing calls to rustc. Rather than calling rustc +/// directly, we'll use an Executor, giving clients an opportunity to intercept +/// the build calls. +pub trait Executor: Send + Sync + 'static { + /// Called after a rustc process invocation is prepared up-front for a given + /// unit of work (may still be modified for runtime-known dependencies, when + /// the work is actually executed). + fn init(&self, _cx: &Context, _unit: &Unit) {} + + /// In case of an `Err`, Cargo will not continue with the build process for + /// this package. + fn exec(&self, + cmd: ProcessBuilder, + _id: &PackageId, + _target: &Target) + -> CargoResult<()> { + cmd.exec()?; + Ok(()) + } + + fn exec_json(&self, + cmd: ProcessBuilder, + _id: &PackageId, + _target: &Target, + handle_stdout: &mut FnMut(&str) -> CargoResult<()>, + handle_stderr: &mut FnMut(&str) -> CargoResult<()>) + -> CargoResult<()> { + cmd.exec_with_streaming(handle_stdout, handle_stderr, false)?; + Ok(()) + } + + /// Queried when queuing each unit of work. If it returns true, then the + /// unit will always be rebuilt, independent of whether it needs to be. + fn force_rebuild(&self, _unit: &Unit) -> bool { + false + } +} + +/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's +/// default behaviour. +#[derive(Copy, Clone)] +pub struct DefaultExecutor; + +impl Executor for DefaultExecutor {} + +// Returns a mapping of the root package plus its immediate dependencies to +// where the compiled libraries are all located. +pub fn compile_targets<'a, 'cfg: 'a>(ws: &Workspace<'cfg>, + pkg_targets: &'a PackagesToBuild<'a>, + packages: &'a PackageSet<'cfg>, + resolve: &'a Resolve, + config: &'cfg Config, + build_config: BuildConfig, + profiles: &'a Profiles, + exec: Arc) + -> CargoResult> { + let units = pkg_targets.iter().flat_map(|&(pkg, ref targets)| { + let default_kind = if build_config.requested_target.is_some() { + Kind::Target + } else { + Kind::Host + }; + targets.iter().map(move |&(target, profile)| { + Unit { + pkg: pkg, + target: target, + profile: profile, + kind: if target.for_host() {Kind::Host} else {default_kind}, + } + }) + }).collect::>(); + + let mut cx = Context::new(ws, resolve, packages, config, + build_config, profiles)?; + + let mut queue = JobQueue::new(&cx); + + cx.prepare()?; + cx.probe_target_info(&units)?; + cx.build_used_in_plugin_map(&units)?; + custom_build::build_map(&mut cx, &units)?; + + for unit in units.iter() { + // Build up a list of pending jobs, each of which represent + // compiling a particular package. No actual work is executed as + // part of this, that's all done next as part of the `execute` + // function which will run everything in order with proper + // parallelism. + compile(&mut cx, &mut queue, unit, Arc::clone(&exec))?; + } + + // Now that we've figured out everything that we're going to do, do it! + queue.execute(&mut cx)?; + + for unit in units.iter() { + for &(ref dst, ref link_dst, file_type) in cx.target_filenames(unit)?.iter() { + if file_type == TargetFileType::DebugInfo { + continue; + } + + let bindst = match *link_dst { + Some(ref link_dst) => link_dst, + None => dst, + }; + + if unit.profile.test { + cx.compilation.tests.push((unit.pkg.clone(), + unit.target.kind().clone(), + unit.target.name().to_string(), + dst.clone())); + } else if unit.target.is_bin() || unit.target.is_example() { + cx.compilation.binaries.push(bindst.clone()); + } else if unit.target.is_lib() { + let pkgid = unit.pkg.package_id().clone(); + cx.compilation.libraries.entry(pkgid).or_insert(HashSet::new()) + .insert((unit.target.clone(), dst.clone())); + } + } + + for dep in cx.dep_targets(unit)?.iter() { + if !unit.target.is_lib() { continue } + + if dep.profile.run_custom_build { + let out_dir = cx.build_script_out_dir(dep).display().to_string(); + cx.compilation.extra_env.entry(dep.pkg.package_id().clone()) + .or_insert(Vec::new()) + .push(("OUT_DIR".to_string(), out_dir)); + } + + if !dep.target.is_lib() { continue } + if dep.profile.doc { continue } + + let v = cx.target_filenames(dep)?; + cx.compilation.libraries + .entry(unit.pkg.package_id().clone()) + .or_insert(HashSet::new()) + .extend(v.iter().map(|&(ref f, _, _)| { + (dep.target.clone(), f.clone()) + })); + } + + let feats = cx.resolve.features(unit.pkg.package_id()); + cx.compilation.cfgs.entry(unit.pkg.package_id().clone()) + .or_insert_with(HashSet::new) + .extend(feats.iter().map(|feat| format!("feature=\"{}\"", feat))); + + output_depinfo(&mut cx, unit)?; + } + + for (&(ref pkg, _), output) in cx.build_state.outputs.lock().unwrap().iter() { + cx.compilation.cfgs.entry(pkg.clone()) + .or_insert_with(HashSet::new) + .extend(output.cfgs.iter().cloned()); + + cx.compilation.extra_env.entry(pkg.clone()) + .or_insert_with(Vec::new) + .extend(output.env.iter().cloned()); + + for dir in output.library_paths.iter() { + cx.compilation.native_dirs.insert(dir.clone()); + } + } + cx.compilation.target = cx.target_triple().to_string(); + Ok(cx.compilation) +} + +fn compile<'a, 'cfg: 'a>(cx: &mut Context<'a, 'cfg>, + jobs: &mut JobQueue<'a>, + unit: &Unit<'a>, + exec: Arc) -> CargoResult<()> { + if !cx.compiled.insert(*unit) { + return Ok(()) + } + + // Build up the work to be done to compile this unit, enqueuing it once + // we've got everything constructed. + let p = profile::start(format!("preparing: {}/{}", unit.pkg, + unit.target.name())); + fingerprint::prepare_init(cx, unit)?; + cx.links.validate(cx.resolve, unit)?; + + let (dirty, fresh, freshness) = if unit.profile.run_custom_build { + custom_build::prepare(cx, unit)? + } else if unit.profile.doc && unit.profile.test { + // we run these targets later, so this is just a noop for now + (Work::noop(), Work::noop(), Freshness::Fresh) + } else { + let (mut freshness, dirty, fresh) = fingerprint::prepare_target(cx, unit)?; + let work = if unit.profile.doc { + rustdoc(cx, unit)? + } else { + rustc(cx, unit, Arc::clone(&exec))? + }; + // Need to link targets on both the dirty and fresh + let dirty = work.then(link_targets(cx, unit, false)?).then(dirty); + let fresh = link_targets(cx, unit, true)?.then(fresh); + + if exec.force_rebuild(unit) { + freshness = Freshness::Dirty; + } + + (dirty, fresh, freshness) + }; + jobs.enqueue(cx, unit, Job::new(dirty, fresh), freshness)?; + drop(p); + + // Be sure to compile all dependencies of this target as well. + for unit in cx.dep_targets(unit)?.iter() { + compile(cx, jobs, unit, exec.clone())?; + } + + Ok(()) +} + +fn rustc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + exec: Arc) -> CargoResult { + let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; + + let name = unit.pkg.name().to_string(); + + // If this is an upstream dep we don't want warnings from, turn off all + // lints. + if !cx.show_warnings(unit.pkg.package_id()) { + rustc.arg("--cap-lints").arg("allow"); + + // If this is an upstream dep but we *do* want warnings, make sure that they + // don't fail compilation. + } else if !unit.pkg.package_id().source_id().is_path() { + rustc.arg("--cap-lints").arg("warn"); + } + + let filenames = cx.target_filenames(unit)?; + let root = cx.out_dir(unit); + let kind = unit.kind; + + // Prepare the native lib state (extra -L and -l flags) + let build_state = cx.build_state.clone(); + let current_id = unit.pkg.package_id().clone(); + let build_deps = load_build_deps(cx, unit); + + // If we are a binary and the package also contains a library, then we + // don't pass the `-l` flags. + let pass_l_flag = unit.target.is_lib() || + !unit.pkg.targets().iter().any(|t| t.is_lib()); + let do_rename = unit.target.allows_underscores() && !unit.profile.test; + let real_name = unit.target.name().to_string(); + let crate_name = unit.target.crate_name(); + + // XXX(Rely on target_filenames iterator as source of truth rather than rederiving filestem) + let rustc_dep_info_loc = if do_rename && cx.target_metadata(unit).is_none() { + root.join(&crate_name) + } else { + root.join(&cx.file_stem(unit)) + }.with_extension("d"); + let dep_info_loc = fingerprint::dep_info_loc(cx, unit); + let cwd = cx.config.cwd().to_path_buf(); + + rustc.args(&cx.incremental_args(unit)?); + rustc.args(&cx.rustflags_args(unit)?); + let json_messages = cx.build_config.json_messages; + let package_id = unit.pkg.package_id().clone(); + let target = unit.target.clone(); + + exec.init(cx, unit); + let exec = exec.clone(); + + let root_output = cx.target_root().to_path_buf(); + + return Ok(Work::new(move |state| { + // Only at runtime have we discovered what the extra -L and -l + // arguments are for native libraries, so we process those here. We + // also need to be sure to add any -L paths for our plugins to the + // dynamic library load path as a plugin's dynamic library may be + // located somewhere in there. + // Finally, if custom environment variables have been produced by + // previous build scripts, we include them in the rustc invocation. + if let Some(build_deps) = build_deps { + let build_state = build_state.outputs.lock().unwrap(); + add_native_deps(&mut rustc, &build_state, &build_deps, + pass_l_flag, ¤t_id)?; + add_plugin_deps(&mut rustc, &build_state, &build_deps, + &root_output)?; + add_custom_env(&mut rustc, &build_state, ¤t_id, kind)?; + } + + for &(ref filename, ref _link_dst, _linkable) in filenames.iter() { + // If there is both an rmeta and rlib, rustc will prefer to use the + // rlib, even if it is older. Therefore, we must delete the rlib to + // force using the new rmeta. + if filename.extension() == Some(OsStr::new("rmeta")) { + let dst = root.join(filename).with_extension("rlib"); + if dst.exists() { + fs::remove_file(&dst).chain_err(|| { + format!("Could not remove file: {}.", dst.display()) + })?; + } + } + } + + state.running(&rustc); + if json_messages { + exec.exec_json(rustc, &package_id, &target, + &mut |line| if !line.is_empty() { + Err(internal(&format!("compiler stdout is not empty: `{}`", line))) + } else { + Ok(()) + }, + &mut |line| { + // stderr from rustc can have a mix of JSON and non-JSON output + if line.starts_with('{') { + // Handle JSON lines + let compiler_message = serde_json::from_str(line).map_err(|_| { + internal(&format!("compiler produced invalid json: `{}`", line)) + })?; + + machine_message::emit(&machine_message::FromCompiler { + package_id: &package_id, + target: &target, + message: compiler_message, + }); + } else { + // Forward non-JSON to stderr + writeln!(io::stderr(), "{}", line)?; + } + Ok(()) + } + ).chain_err(|| { + format!("Could not compile `{}`.", name) + })?; + } else { + exec.exec(rustc, &package_id, &target).map_err(|e| e.into_internal()).chain_err(|| { + format!("Could not compile `{}`.", name) + })?; + } + + if do_rename && real_name != crate_name { + let dst = &filenames[0].0; + let src = dst.with_file_name(dst.file_name().unwrap() + .to_str().unwrap() + .replace(&real_name, &crate_name)); + if src.exists() && src.file_name() != dst.file_name() { + fs::rename(&src, &dst).chain_err(|| { + internal(format!("could not rename crate {:?}", src)) + })?; + } + } + + if fs::metadata(&rustc_dep_info_loc).is_ok() { + info!("Renaming dep_info {:?} to {:?}", rustc_dep_info_loc, dep_info_loc); + fs::rename(&rustc_dep_info_loc, &dep_info_loc).chain_err(|| { + internal(format!("could not rename dep info: {:?}", + rustc_dep_info_loc)) + })?; + fingerprint::append_current_dir(&dep_info_loc, &cwd)?; + } + + Ok(()) + })); + + // Add all relevant -L and -l flags from dependencies (now calculated and + // present in `state`) to the command provided + fn add_native_deps(rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + pass_l_flag: bool, + current_id: &PackageId) -> CargoResult<()> { + for key in build_scripts.to_link.iter() { + let output = build_state.get(key).ok_or_else(|| { + internal(format!("couldn't find build state for {}/{:?}", + key.0, key.1)) + })?; + for path in output.library_paths.iter() { + rustc.arg("-L").arg(path); + } + if key.0 == *current_id { + for cfg in &output.cfgs { + rustc.arg("--cfg").arg(cfg); + } + if pass_l_flag { + for name in output.library_links.iter() { + rustc.arg("-l").arg(name); + } + } + } + } + Ok(()) + } + + // Add all custom environment variables present in `state` (after they've + // been put there by one of the `build_scripts`) to the command provided. + fn add_custom_env(rustc: &mut ProcessBuilder, + build_state: &BuildMap, + current_id: &PackageId, + kind: Kind) -> CargoResult<()> { + let key = (current_id.clone(), kind); + if let Some(output) = build_state.get(&key) { + for &(ref name, ref value) in output.env.iter() { + rustc.env(name, value); + } + } + Ok(()) + } +} + +/// Link the compiled target (often of form `foo-{metadata_hash}`) to the +/// final target. This must happen during both "Fresh" and "Compile" +fn link_targets<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + fresh: bool) -> CargoResult { + let filenames = cx.target_filenames(unit)?; + let package_id = unit.pkg.package_id().clone(); + let target = unit.target.clone(); + let profile = unit.profile.clone(); + let features = cx.resolve.features_sorted(&package_id).into_iter() + .map(|s| s.to_owned()) + .collect(); + let json_messages = cx.build_config.json_messages; + + Ok(Work::new(move |_| { + // If we're a "root crate", e.g. the target of this compilation, then we + // hard link our outputs out of the `deps` directory into the directory + // above. This means that `cargo build` will produce binaries in + // `target/debug` which one probably expects. + let mut destinations = vec![]; + for &(ref src, ref link_dst, _file_type) in filenames.iter() { + // This may have been a `cargo rustc` command which changes the + // output, so the source may not actually exist. + if !src.exists() { + continue + } + let dst = match link_dst.as_ref() { + Some(dst) => dst, + None => { + destinations.push(src.display().to_string()); + continue; + } + }; + destinations.push(dst.display().to_string()); + + debug!("linking {} to {}", src.display(), dst.display()); + if is_same_file(src, dst).unwrap_or(false) { + continue + } + if dst.exists() { + fs::remove_file(&dst).chain_err(|| { + format!("failed to remove: {}", dst.display()) + })?; + } + + let link_result = if src.is_dir() { + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(target_os = "redox")] + use std::os::redox::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + symlink(src, dst) + } else { + fs::hard_link(src, dst) + }; + link_result + .or_else(|err| { + debug!("link failed {}. falling back to fs::copy", err); + fs::copy(src, dst).map(|_| ()) + }) + .chain_err(|| { + format!("failed to link or copy `{}` to `{}`", + src.display(), dst.display()) + })?; + } + + if json_messages { + machine_message::emit(&machine_message::Artifact { + package_id: &package_id, + target: &target, + profile: &profile, + features: features, + filenames: destinations, + fresh: fresh, + }); + } + Ok(()) + })) +} + +fn load_build_deps(cx: &Context, unit: &Unit) -> Option> { + cx.build_scripts.get(unit).cloned() +} + +// For all plugin dependencies, add their -L paths (now calculated and +// present in `state`) to the dynamic library load path for the command to +// execute. +fn add_plugin_deps(rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + root_output: &PathBuf) + -> CargoResult<()> { + let var = util::dylib_path_envvar(); + let search_path = rustc.get_env(var).unwrap_or_default(); + let mut search_path = env::split_paths(&search_path).collect::>(); + for id in build_scripts.plugins.iter() { + let key = (id.clone(), Kind::Host); + let output = build_state.get(&key).ok_or_else(|| { + internal(format!("couldn't find libs for plugin dep {}", id)) + })?; + search_path.append(&mut filter_dynamic_search_path(output.library_paths.iter(), + root_output)); + } + let search_path = join_paths(&search_path, var)?; + rustc.env(var, &search_path); + Ok(()) +} + +// Determine paths to add to the dynamic search path from -L entries +// +// Strip off prefixes like "native=" or "framework=" and filter out directories +// *not* inside our output directory since they are likely spurious and can cause +// clashes with system shared libraries (issue #3366). +fn filter_dynamic_search_path<'a, I>(paths :I, root_output: &PathBuf) -> Vec + where I: Iterator { + let mut search_path = vec![]; + for dir in paths { + let dir = match dir.to_str() { + Some(s) => { + let mut parts = s.splitn(2, '='); + match (parts.next(), parts.next()) { + (Some("native"), Some(path)) | + (Some("crate"), Some(path)) | + (Some("dependency"), Some(path)) | + (Some("framework"), Some(path)) | + (Some("all"), Some(path)) => path.into(), + _ => dir.clone(), + } + } + None => dir.clone(), + }; + if dir.starts_with(&root_output) { + search_path.push(dir); + } else { + debug!("Not including path {} in runtime library search path because it is \ + outside target root {}", dir.display(), root_output.display()); + } + } + search_path +} + +fn prepare_rustc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + crate_types: &[&str], + unit: &Unit<'a>) -> CargoResult { + let mut base = cx.compilation.rustc_process(unit.pkg)?; + base.inherit_jobserver(&cx.jobserver); + build_base_args(cx, &mut base, unit, crate_types); + build_deps_args(&mut base, cx, unit)?; + Ok(base) +} + + +fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>) -> CargoResult { + let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg)?; + rustdoc.inherit_jobserver(&cx.jobserver); + rustdoc.arg("--crate-name").arg(&unit.target.crate_name()) + .cwd(cx.config.cwd()) + .arg(&root_path(cx, unit)); + + if unit.kind != Kind::Host { + if let Some(target) = cx.requested_target() { + rustdoc.arg("--target").arg(target); + } + } + + let doc_dir = cx.out_dir(unit); + + // Create the documentation directory ahead of time as rustdoc currently has + // a bug where concurrent invocations will race to create this directory if + // it doesn't already exist. + fs::create_dir_all(&doc_dir)?; + + rustdoc.arg("-o").arg(doc_dir); + + for feat in cx.resolve.features_sorted(unit.pkg.package_id()) { + rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + if let Some(ref args) = unit.profile.rustdoc_args { + rustdoc.args(args); + } + + build_deps_args(&mut rustdoc, cx, unit)?; + + rustdoc.args(&cx.rustdocflags_args(unit)?); + + let name = unit.pkg.name().to_string(); + let build_state = cx.build_state.clone(); + let key = (unit.pkg.package_id().clone(), unit.kind); + + Ok(Work::new(move |state| { + if let Some(output) = build_state.outputs.lock().unwrap().get(&key) { + for cfg in output.cfgs.iter() { + rustdoc.arg("--cfg").arg(cfg); + } + for &(ref name, ref value) in output.env.iter() { + rustdoc.env(name, value); + } + } + state.running(&rustdoc); + rustdoc.exec().chain_err(|| format!("Could not document `{}`.", name)) + })) +} + +// The path that we pass to rustc is actually fairly important because it will +// show up in error messages and the like. For this reason we take a few moments +// to ensure that something shows up pretty reasonably. +// +// The heuristic here is fairly simple, but the key idea is that the path is +// always "relative" to the current directory in order to be found easily. The +// path is only actually relative if the current directory is an ancestor if it. +// This means that non-path dependencies (git/registry) will likely be shown as +// absolute paths instead of relative paths. +fn root_path(cx: &Context, unit: &Unit) -> PathBuf { + let absolute = unit.pkg.root().join(unit.target.src_path()); + let cwd = cx.config.cwd(); + if absolute.starts_with(cwd) { + util::without_prefix(&absolute, cwd).map(|s| { + s.to_path_buf() + }).unwrap_or(absolute) + } else { + absolute + } +} + +fn build_base_args<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, + cmd: &mut ProcessBuilder, + unit: &Unit<'a>, + crate_types: &[&str]) { + let Profile { + ref opt_level, lto, codegen_units, ref rustc_args, debuginfo, + debug_assertions, overflow_checks, rpath, test, doc: _doc, + run_custom_build, ref panic, rustdoc_args: _, check, + } = *unit.profile; + assert!(!run_custom_build); + + // Move to cwd so the root_path() passed below is actually correct + cmd.cwd(cx.config.cwd()); + + cmd.arg("--crate-name").arg(&unit.target.crate_name()); + + cmd.arg(&root_path(cx, unit)); + + match cx.config.shell().color_choice() { + ColorChoice::Always => { cmd.arg("--color").arg("always"); } + ColorChoice::Never => { cmd.arg("--color").arg("never"); } + ColorChoice::CargoAuto => {} + } + + if cx.build_config.json_messages { + cmd.arg("--error-format").arg("json"); + } + + if !test { + for crate_type in crate_types.iter() { + cmd.arg("--crate-type").arg(crate_type); + } + } + + if check { + cmd.arg("--emit=dep-info,metadata"); + } else { + cmd.arg("--emit=dep-info,link"); + } + + let prefer_dynamic = (unit.target.for_host() && + !unit.target.is_custom_build()) || + (crate_types.contains(&"dylib") && + cx.ws.members().any(|p| p != unit.pkg)); + if prefer_dynamic { + cmd.arg("-C").arg("prefer-dynamic"); + } + + if opt_level != "0" { + cmd.arg("-C").arg(&format!("opt-level={}", opt_level)); + } + + // If a panic mode was configured *and* we're not ever going to be used in a + // plugin, then we can compile with that panic mode. + // + // If we're used in a plugin then we'll eventually be linked to libsyntax + // most likely which isn't compiled with a custom panic mode, so we'll just + // get an error if we actually compile with that. This fixes `panic=abort` + // crates which have plugin dependencies, but unfortunately means that + // dependencies shared between the main application and plugins must be + // compiled without `panic=abort`. This isn't so bad, though, as the main + // application will still be compiled with `panic=abort`. + if let Some(panic) = panic.as_ref() { + if !cx.used_in_plugin.contains(unit) { + cmd.arg("-C").arg(format!("panic={}", panic)); + } + } + + // Disable LTO for host builds as prefer_dynamic and it are mutually + // exclusive. + if unit.target.can_lto() && lto && !unit.target.for_host() { + cmd.args(&["-C", "lto"]); + } else if let Some(n) = codegen_units { + // There are some restrictions with LTO and codegen-units, so we + // only add codegen units when LTO is not used. + cmd.arg("-C").arg(&format!("codegen-units={}", n)); + } + + if let Some(debuginfo) = debuginfo { + cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); + } + + if let Some(ref args) = *rustc_args { + cmd.args(args); + } + + // -C overflow-checks is implied by the setting of -C debug-assertions, + // so we only need to provide -C overflow-checks if it differs from + // the value of -C debug-assertions we would provide. + if opt_level != "0" { + if debug_assertions { + cmd.args(&["-C", "debug-assertions=on"]); + if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + } else if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !debug_assertions { + cmd.args(&["-C", "debug-assertions=off"]); + if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + + if test && unit.target.harness() { + cmd.arg("--test"); + } else if test { + cmd.arg("--cfg").arg("test"); + } + + // We ideally want deterministic invocations of rustc to ensure that + // rustc-caching strategies like sccache are able to cache more, so sort the + // feature list here. + for feat in cx.resolve.features_sorted(unit.pkg.package_id()) { + cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + + match cx.target_metadata(unit) { + Some(m) => { + cmd.arg("-C").arg(&format!("metadata={}", m)); + cmd.arg("-C").arg(&format!("extra-filename=-{}", m)); + } + None => { + cmd.arg("-C").arg(&format!("metadata={}", cx.target_short_hash(unit))); + } + } + + if rpath { + cmd.arg("-C").arg("rpath"); + } + + cmd.arg("--out-dir").arg(&cx.out_dir(unit)); + + fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, + val: Option<&OsStr>) { + if let Some(val) = val { + let mut joined = OsString::from(prefix); + joined.push(val); + cmd.arg(key).arg(joined); + } + } + + if unit.kind == Kind::Target { + opt(cmd, "--target", "", cx.requested_target().map(|s| s.as_ref())); + } + + opt(cmd, "-C", "ar=", cx.ar(unit.kind).map(|s| s.as_ref())); + opt(cmd, "-C", "linker=", cx.linker(unit.kind).map(|s| s.as_ref())); +} + + +fn build_deps_args<'a, 'cfg>(cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>) -> CargoResult<()> { + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.deps_dir(unit)); + deps + }); + + // Be sure that the host path is also listed. This'll ensure that proc-macro + // dependencies are correctly found (for reexported macros). + if let Kind::Target = unit.kind { + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.host_deps()); + deps + }); + } + + for unit in cx.dep_targets(unit)?.iter() { + if unit.profile.run_custom_build { + cmd.env("OUT_DIR", &cx.build_script_out_dir(unit)); + } + if unit.target.linkable() && !unit.profile.doc { + link_to(cmd, cx, unit)?; + } + } + + return Ok(()); + + fn link_to<'a, 'cfg>(cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>) -> CargoResult<()> { + for &(ref dst, _, file_type) in cx.target_filenames(unit)?.iter() { + if file_type != TargetFileType::Linkable { + continue + } + let mut v = OsString::new(); + v.push(&unit.target.crate_name()); + v.push("="); + v.push(cx.out_dir(unit)); + v.push(&path::MAIN_SEPARATOR.to_string()); + v.push(&dst.file_name().unwrap()); + cmd.arg("--extern").arg(&v); + } + Ok(()) + } +} + +fn envify(s: &str) -> String { + s.chars() + .flat_map(|c| c.to_uppercase()) + .map(|c| if c == '-' {'_'} else {c}) + .collect() +} + +impl Kind { + fn for_target(&self, target: &Target) -> Kind { + // Once we start compiling for the `Host` kind we continue doing so, but + // if we are a `Target` kind and then we start compiling for a target + // that needs to be on the host we lift ourselves up to `Host` + match *self { + Kind::Host => Kind::Host, + Kind::Target if target.for_host() => Kind::Host, + Kind::Target => Kind::Target, + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/output_depinfo.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/output_depinfo.rs new file mode 100644 index 000000000..b07b299f0 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_rustc/output_depinfo.rs @@ -0,0 +1,95 @@ +use std::collections::HashSet; +use std::io::{Write, BufWriter, ErrorKind}; +use std::fs::{self, File}; +use std::path::{Path, PathBuf}; + +use ops::{Context, Unit}; +use util::{CargoResult, internal}; +use ops::cargo_rustc::fingerprint; + +fn render_filename>(path: P, basedir: Option<&str>) -> CargoResult { + let path = path.as_ref(); + let relpath = match basedir { + None => path, + Some(base) => match path.strip_prefix(base) { + Ok(relpath) => relpath, + _ => path, + } + }; + relpath.to_str().ok_or_else(|| internal("path not utf-8")).map(|f| f.replace(" ", "\\ ")) +} + +fn add_deps_for_unit<'a, 'b>( + deps: &mut HashSet, + context: &mut Context<'a, 'b>, + unit: &Unit<'a>, + visited: &mut HashSet>, +) + -> CargoResult<()> +{ + if !visited.insert(*unit) { + return Ok(()); + } + + // units representing the execution of a build script don't actually + // generate a dep info file, so we just keep on going below + if !unit.profile.run_custom_build { + // Add dependencies from rustc dep-info output (stored in fingerprint directory) + let dep_info_loc = fingerprint::dep_info_loc(context, unit); + if let Some(paths) = fingerprint::parse_dep_info(&dep_info_loc)? { + for path in paths { + deps.insert(path); + } + } else { + debug!("can't find dep_info for {:?} {:?}", + unit.pkg.package_id(), unit.profile); + return Err(internal("dep_info missing")); + } + } + + // Add rerun-if-changed dependencies + let key = (unit.pkg.package_id().clone(), unit.kind); + if let Some(output) = context.build_state.outputs.lock().unwrap().get(&key) { + for path in &output.rerun_if_changed { + deps.insert(path.into()); + } + } + + // Recursively traverse all transitive dependencies + for dep_unit in &context.dep_targets(unit)? { + let source_id = dep_unit.pkg.package_id().source_id(); + if source_id.is_path() { + add_deps_for_unit(deps, context, dep_unit, visited)?; + } + } + Ok(()) +} + +pub fn output_depinfo<'a, 'b>(context: &mut Context<'a, 'b>, unit: &Unit<'a>) -> CargoResult<()> { + let mut deps = HashSet::new(); + let mut visited = HashSet::new(); + let success = add_deps_for_unit(&mut deps, context, unit, &mut visited).is_ok(); + let basedir = None; // TODO + for &(_, ref link_dst, _) in context.target_filenames(unit)?.iter() { + if let Some(ref link_dst) = *link_dst { + let output_path = link_dst.with_extension("d"); + if success { + let mut outfile = BufWriter::new(File::create(output_path)?); + let target_fn = render_filename(link_dst, basedir)?; + write!(outfile, "{}:", target_fn)?; + for dep in &deps { + write!(outfile, " {}", render_filename(dep, basedir)?)?; + } + writeln!(outfile, "")?; + } else if let Err(err) = fs::remove_file(output_path) { + // dep-info generation failed, so delete output file. This will usually + // cause the build system to always rerun the build rule, which is correct + // if inefficient. + if err.kind() != ErrorKind::NotFound { + return Err(err.into()); + } + } + } + } + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_test.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_test.rs new file mode 100644 index 000000000..f808ff5e1 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/cargo_test.rs @@ -0,0 +1,214 @@ +use std::ffi::{OsString, OsStr}; + +use ops::{self, Compilation}; +use util::{self, CargoTestError, Test, ProcessError}; +use util::errors::{CargoResult, CargoErrorKind, CargoError}; +use core::Workspace; + +pub struct TestOptions<'a> { + pub compile_opts: ops::CompileOptions<'a>, + pub no_run: bool, + pub no_fail_fast: bool, + pub only_doc: bool, +} + +pub fn run_tests(ws: &Workspace, + options: &TestOptions, + test_args: &[String]) -> CargoResult> { + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None) + } + let (test, mut errors) = if options.only_doc { + assert!(options.compile_opts.filter.is_specific()); + run_doc_tests(options, test_args, &compilation)? + } else { + run_unit_tests(options, test_args, &compilation)? + }; + + // If we have an error and want to fail fast, return + if !errors.is_empty() && !options.no_fail_fast { + return Ok(Some(CargoTestError::new(test, errors))) + } + + // If a specific test was requested or we're not running any tests at all, + // don't run any doc tests. + if options.compile_opts.filter.is_specific() { + match errors.len() { + 0 => return Ok(None), + _ => return Ok(Some(CargoTestError::new(test, errors))) + } + } + + let (doctest, docerrors) = run_doc_tests(options, test_args, &compilation)?; + let test = if docerrors.is_empty() { test } else { doctest }; + errors.extend(docerrors); + if errors.is_empty() { + Ok(None) + } else { + Ok(Some(CargoTestError::new(test, errors))) + } +} + +pub fn run_benches(ws: &Workspace, + options: &TestOptions, + args: &[String]) -> CargoResult> { + let mut args = args.to_vec(); + args.push("--bench".to_string()); + let compilation = compile_tests(ws, options)?; + + if options.no_run { + return Ok(None) + } + let (test, errors) = run_unit_tests(options, &args, &compilation)?; + match errors.len() { + 0 => Ok(None), + _ => Ok(Some(CargoTestError::new(test, errors))), + } +} + +fn compile_tests<'a>(ws: &Workspace<'a>, + options: &TestOptions<'a>) + -> CargoResult> { + let mut compilation = ops::compile(ws, &options.compile_opts)?; + compilation.tests.sort_by(|a, b| { + (a.0.package_id(), &a.1, &a.2).cmp(&(b.0.package_id(), &b.1, &b.2)) + }); + Ok(compilation) +} + +/// Run the unit and integration tests of a project. +fn run_unit_tests(options: &TestOptions, + test_args: &[String], + compilation: &Compilation) + -> CargoResult<(Test, Vec)> { + let config = options.compile_opts.config; + let cwd = options.compile_opts.config.cwd(); + + let mut errors = Vec::new(); + + for &(ref pkg, ref kind, ref test, ref exe) in &compilation.tests { + let to_display = match util::without_prefix(exe, cwd) { + Some(path) => path, + None => &**exe, + }; + let mut cmd = compilation.target_process(exe, pkg)?; + cmd.args(test_args); + config.shell().concise(|shell| { + shell.status("Running", to_display.display().to_string()) + })?; + config.shell().verbose(|shell| { + shell.status("Running", cmd.to_string()) + })?; + + let result = cmd.exec(); + + match result { + Err(CargoError(CargoErrorKind::ProcessErrorKind(e), .. )) => { + errors.push((kind.clone(), test.clone(), e)); + if !options.no_fail_fast { + break; + } + } + Err(e) => { + //This is an unexpected Cargo error rather than a test failure + return Err(e) + } + Ok(()) => {} + } + } + + if errors.len() == 1 { + let (kind, test, e) = errors.pop().unwrap(); + Ok((Test::UnitTest(kind, test), vec![e])) + } else { + Ok((Test::Multiple, errors.into_iter().map((|(_, _, e)| e)).collect())) + } +} + +fn run_doc_tests(options: &TestOptions, + test_args: &[String], + compilation: &Compilation) + -> CargoResult<(Test, Vec)> { + let mut errors = Vec::new(); + let config = options.compile_opts.config; + + // We don't build/rust doctests if target != host + if config.rustc()?.host != compilation.target { + return Ok((Test::Doc, errors)); + } + + let libs = compilation.to_doc_test.iter().map(|package| { + (package, package.targets().iter().filter(|t| t.doctested()) + .map(|t| (t.src_path(), t.name(), t.crate_name()))) + }); + + for (package, tests) in libs { + for (lib, name, crate_name) in tests { + config.shell().status("Doc-tests", name)?; + let mut p = compilation.rustdoc_process(package)?; + p.arg("--test").arg(lib) + .arg("--crate-name").arg(&crate_name); + + for &rust_dep in &[&compilation.deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(rust_dep); + p.arg("-L").arg(arg); + } + + for native_dep in compilation.native_dirs.iter() { + p.arg("-L").arg(native_dep); + } + + for &host_rust_dep in &[&compilation.host_deps_output] { + let mut arg = OsString::from("dependency="); + arg.push(host_rust_dep); + p.arg("-L").arg(arg); + } + + for arg in test_args { + p.arg("--test-args").arg(arg); + } + + if let Some(cfgs) = compilation.cfgs.get(package.package_id()) { + for cfg in cfgs.iter() { + p.arg("--cfg").arg(cfg); + } + } + + let libs = &compilation.libraries[package.package_id()]; + for &(ref target, ref lib) in libs.iter() { + // Note that we can *only* doctest rlib outputs here. A + // staticlib output cannot be linked by the compiler (it just + // doesn't do that). A dylib output, however, can be linked by + // the compiler, but will always fail. Currently all dylibs are + // built as "static dylibs" where the standard library is + // statically linked into the dylib. The doc tests fail, + // however, for now as they try to link the standard library + // dynamically as well, causing problems. As a result we only + // pass `--extern` for rlib deps and skip out on all other + // artifacts. + if lib.extension() != Some(OsStr::new("rlib")) && + !target.for_host() { + continue + } + let mut arg = OsString::from(target.crate_name()); + arg.push("="); + arg.push(lib); + p.arg("--extern").arg(&arg); + } + + config.shell().verbose(|shell| { + shell.status("Running", p.to_string()) + })?; + if let Err(CargoError(CargoErrorKind::ProcessErrorKind(e), .. )) = p.exec() { + errors.push(e); + if !options.no_fail_fast { + return Ok((Test::Doc, errors)); + } + } + } + } + Ok((Test::Doc, errors)) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/lockfile.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/lockfile.rs new file mode 100644 index 000000000..7368bbf8a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/lockfile.rs @@ -0,0 +1,149 @@ +use std::io::prelude::*; + +use toml; + +use core::{Resolve, resolver, Workspace}; +use core::resolver::WorkspaceResolve; +use util::Filesystem; +use util::errors::{CargoResult, CargoResultExt}; +use util::toml as cargo_toml; + +pub fn load_pkg_lockfile(ws: &Workspace) -> CargoResult> { + if !ws.root().join("Cargo.lock").exists() { + return Ok(None) + } + + let root = Filesystem::new(ws.root().to_path_buf()); + let mut f = root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file")?; + + let mut s = String::new(); + f.read_to_string(&mut s).chain_err(|| { + format!("failed to read file: {}", f.path().display()) + })?; + + (|| -> CargoResult> { + let resolve : toml::Value = cargo_toml::parse(&s, f.path(), ws.config())?; + let v: resolver::EncodableResolve = resolve.try_into()?; + Ok(Some(v.into_resolve(ws)?)) + })().chain_err(|| { + format!("failed to parse lock file at: {}", f.path().display()) + }) +} + +pub fn write_pkg_lockfile(ws: &Workspace, resolve: &Resolve) -> CargoResult<()> { + // Load the original lockfile if it exists. + let ws_root = Filesystem::new(ws.root().to_path_buf()); + let orig = ws_root.open_ro("Cargo.lock", ws.config(), "Cargo.lock file"); + let orig = orig.and_then(|mut f| { + let mut s = String::new(); + f.read_to_string(&mut s)?; + Ok(s) + }); + + let toml = toml::Value::try_from(WorkspaceResolve { ws, resolve }).unwrap(); + + let mut out = String::new(); + + let deps = toml["package"].as_array().unwrap(); + for dep in deps.iter() { + let dep = dep.as_table().unwrap(); + + out.push_str("[[package]]\n"); + emit_package(dep, &mut out); + } + + if let Some(patch) = toml.get("patch") { + let list = patch["unused"].as_array().unwrap(); + for entry in list { + out.push_str("[[patch.unused]]\n"); + emit_package(entry.as_table().unwrap(), &mut out); + out.push_str("\n"); + } + } + + if let Some(meta) = toml.get("metadata") { + out.push_str("[metadata]\n"); + out.push_str(&meta.to_string()); + } + + // If the lockfile contents haven't changed so don't rewrite it. This is + // helpful on read-only filesystems. + if let Ok(orig) = orig { + if are_equal_lockfiles(orig, &out, ws) { + return Ok(()) + } + } + + if !ws.config().lock_update_allowed() { + let flag = if ws.config().network_allowed() {"--locked"} else {"--frozen"}; + bail!("the lock file needs to be updated but {} was passed to \ + prevent this", flag); + } + + // Ok, if that didn't work just write it out + ws_root.open_rw("Cargo.lock", ws.config(), "Cargo.lock file").and_then(|mut f| { + f.file().set_len(0)?; + f.write_all(out.as_bytes())?; + Ok(()) + }).chain_err(|| { + format!("failed to write {}", + ws.root().join("Cargo.lock").display()) + }) +} + +fn are_equal_lockfiles(mut orig: String, current: &str, ws: &Workspace) -> bool { + if has_crlf_line_endings(&orig) { + orig = orig.replace("\r\n", "\n"); + } + + // If we want to try and avoid updating the lockfile, parse both and + // compare them; since this is somewhat expensive, don't do it in the + // common case where we can update lockfiles. + if !ws.config().lock_update_allowed() { + let res: CargoResult = (|| { + let old: resolver::EncodableResolve = toml::from_str(&orig)?; + let new: resolver::EncodableResolve = toml::from_str(current)?; + Ok(old.into_resolve(ws)? == new.into_resolve(ws)?) + })(); + if let Ok(true) = res { + return true; + } + } + + current == orig +} + +fn has_crlf_line_endings(s: &str) -> bool { + // Only check the first line. + if let Some(lf) = s.find('\n') { + s[..lf].ends_with('\r') + } else { + false + } +} + +fn emit_package(dep: &toml::value::Table, out: &mut String) { + out.push_str(&format!("name = {}\n", &dep["name"])); + out.push_str(&format!("version = {}\n", &dep["version"])); + + if dep.contains_key("source") { + out.push_str(&format!("source = {}\n", &dep["source"])); + } + + if let Some(s) = dep.get("dependencies") { + let slice = s.as_array().unwrap(); + + if !slice.is_empty() { + out.push_str("dependencies = [\n"); + + for child in slice.iter() { + out.push_str(&format!(" {},\n", child)); + } + + out.push_str("]\n"); + } + out.push_str("\n"); + } else if dep.contains_key("replace") { + out.push_str(&format!("replace = {}\n\n", &dep["replace"])); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/mod.rs new file mode 100644 index 000000000..0cd1ec718 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/mod.rs @@ -0,0 +1,43 @@ +pub use self::cargo_clean::{clean, CleanOptions}; +pub use self::cargo_compile::{compile, compile_with_exec, compile_ws, CompileOptions}; +pub use self::cargo_compile::{CompileFilter, CompileMode, FilterRule, MessageFormat, Packages}; +pub use self::cargo_read_manifest::{read_package, read_packages}; +pub use self::cargo_rustc::{compile_targets, Compilation, Kind, Unit}; +pub use self::cargo_rustc::{Context, is_bad_artifact_name}; +pub use self::cargo_rustc::{BuildOutput, BuildConfig, TargetConfig}; +pub use self::cargo_rustc::{Executor, DefaultExecutor}; +pub use self::cargo_run::run; +pub use self::cargo_install::{install, install_list, uninstall}; +pub use self::cargo_new::{new, init, NewOptions, VersionControl}; +pub use self::cargo_doc::{doc, DocOptions}; +pub use self::cargo_generate_lockfile::{generate_lockfile}; +pub use self::cargo_generate_lockfile::{update_lockfile}; +pub use self::cargo_generate_lockfile::UpdateOptions; +pub use self::lockfile::{load_pkg_lockfile, write_pkg_lockfile}; +pub use self::cargo_test::{run_tests, run_benches, TestOptions}; +pub use self::cargo_package::{package, PackageOpts}; +pub use self::registry::{publish, registry_configuration, RegistryConfig}; +pub use self::registry::{registry_login, search, http_proxy_exists, http_handle}; +pub use self::registry::{modify_owners, yank, OwnersOptions, PublishOpts}; +pub use self::cargo_fetch::fetch; +pub use self::cargo_pkgid::pkgid; +pub use self::resolve::{resolve_ws, resolve_ws_precisely, resolve_with_previous}; +pub use self::cargo_output_metadata::{output_metadata, OutputMetadataOptions, ExportInfo}; + +mod cargo_clean; +mod cargo_compile; +mod cargo_doc; +mod cargo_fetch; +mod cargo_generate_lockfile; +mod cargo_install; +mod cargo_new; +mod cargo_output_metadata; +mod cargo_package; +mod cargo_pkgid; +mod cargo_read_manifest; +mod cargo_run; +mod cargo_rustc; +mod cargo_test; +mod lockfile; +mod registry; +mod resolve; diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/registry.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/registry.rs new file mode 100644 index 000000000..42ff2f872 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/registry.rs @@ -0,0 +1,445 @@ +use std::env; +use std::fs::{self, File}; +use std::iter::repeat; +use std::time::Duration; + +use curl::easy::{Easy, SslOpt}; +use git2; +use registry::{Registry, NewCrate, NewCrateDependency}; + +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +use version; +use core::source::Source; +use core::{Package, SourceId, Workspace}; +use core::dependency::Kind; +use core::manifest::ManifestMetadata; +use ops; +use sources::{RegistrySource}; +use util::config::{self, Config}; +use util::paths; +use util::ToUrl; +use util::errors::{CargoError, CargoResult, CargoResultExt}; +use util::important_paths::find_root_manifest_for_wd; + +pub struct RegistryConfig { + pub index: Option, + pub token: Option, +} + +pub struct PublishOpts<'cfg> { + pub config: &'cfg Config, + pub token: Option, + pub index: Option, + pub verify: bool, + pub allow_dirty: bool, + pub jobs: Option, + pub target: Option<&'cfg str>, + pub dry_run: bool, +} + +pub fn publish(ws: &Workspace, opts: &PublishOpts) -> CargoResult<()> { + let pkg = ws.current()?; + + if !pkg.publish() { + bail!("some crates cannot be published.\n\ + `{}` is marked as unpublishable", pkg.name()); + } + if !pkg.manifest().patch().is_empty() { + bail!("published crates cannot contain [patch] sections"); + } + + let (mut registry, reg_id) = registry(opts.config, + opts.token.clone(), + opts.index.clone())?; + verify_dependencies(pkg, ®_id)?; + + // Prepare a tarball, with a non-surpressable warning if metadata + // is missing since this is being put online. + let tarball = ops::package(ws, &ops::PackageOpts { + config: opts.config, + verify: opts.verify, + list: false, + check_metadata: true, + allow_dirty: opts.allow_dirty, + target: opts.target, + jobs: opts.jobs, + })?.unwrap(); + + // Upload said tarball to the specified destination + opts.config.shell().status("Uploading", pkg.package_id().to_string())?; + transmit(opts.config, pkg, tarball.file(), &mut registry, opts.dry_run)?; + + Ok(()) +} + +fn verify_dependencies(pkg: &Package, registry_src: &SourceId) + -> CargoResult<()> { + for dep in pkg.dependencies().iter() { + if dep.source_id().is_path() { + if !dep.specified_req() { + bail!("all path dependencies must have a version specified \ + when publishing.\ndependency `{}` does not specify \ + a version", dep.name()) + } + } else if dep.source_id() != registry_src { + bail!("crates cannot be published to crates.io with dependencies sourced from \ + a repository\neither publish `{}` as its own crate on crates.io and \ + specify a crates.io version as a dependency or pull it into this \ + repository and specify it with a path and version\n(crate `{}` has \ + repository path `{}`)", dep.name(), dep.name(), dep.source_id()); + } + } + Ok(()) +} + +fn transmit(config: &Config, + pkg: &Package, + tarball: &File, + registry: &mut Registry, + dry_run: bool) -> CargoResult<()> { + let deps = pkg.dependencies().iter().map(|dep| { + NewCrateDependency { + optional: dep.is_optional(), + default_features: dep.uses_default_features(), + name: dep.name().to_string(), + features: dep.features().to_vec(), + version_req: dep.version_req().to_string(), + target: dep.platform().map(|s| s.to_string()), + kind: match dep.kind() { + Kind::Normal => "normal", + Kind::Build => "build", + Kind::Development => "dev", + }.to_string(), + } + }).collect::>(); + let manifest = pkg.manifest(); + let ManifestMetadata { + ref authors, ref description, ref homepage, ref documentation, + ref keywords, ref readme, ref repository, ref license, ref license_file, + ref categories, ref badges, + } = *manifest.metadata(); + let readme = match *readme { + Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), + None => None, + }; + if let Some(ref file) = *license_file { + if fs::metadata(&pkg.root().join(file)).is_err() { + bail!("the license file `{}` does not exist", file) + } + } + + // Do not upload if performing a dry run + if dry_run { + config.shell().warn("aborting upload due to dry run")?; + return Ok(()); + } + + let publish = registry.publish(&NewCrate { + name: pkg.name().to_string(), + vers: pkg.version().to_string(), + deps: deps, + features: pkg.summary().features().clone(), + authors: authors.clone(), + description: description.clone(), + homepage: homepage.clone(), + documentation: documentation.clone(), + keywords: keywords.clone(), + categories: categories.clone(), + readme: readme, + repository: repository.clone(), + license: license.clone(), + license_file: license_file.clone(), + badges: badges.clone(), + }, tarball); + + match publish { + Ok(warnings) => { + if !warnings.invalid_categories.is_empty() { + let msg = format!("\ + the following are not valid category slugs and were \ + ignored: {}. Please see https://crates.io/category_slugs \ + for the list of all category slugs. \ + ", warnings.invalid_categories.join(", ")); + config.shell().warn(&msg)?; + } + + if !warnings.invalid_badges.is_empty() { + let msg = format!("\ + the following are not valid badges and were ignored: {}. \ + Either the badge type specified is unknown or a required \ + attribute is missing. Please see \ + http://doc.crates.io/manifest.html#package-metadata \ + for valid badge types and their required attributes.", + warnings.invalid_badges.join(", ")); + config.shell().warn(&msg)?; + } + + Ok(()) + }, + Err(e) => Err(e.into()), + } +} + +pub fn registry_configuration(config: &Config) -> CargoResult { + let index = config.get_string("registry.index")?.map(|p| p.val); + let token = config.get_string("registry.token")?.map(|p| p.val); + Ok(RegistryConfig { index: index, token: token }) +} + +pub fn registry(config: &Config, + token: Option, + index: Option) -> CargoResult<(Registry, SourceId)> { + // Parse all configuration options + let RegistryConfig { + token: token_config, + index: _index_config, + } = registry_configuration(config)?; + let token = token.or(token_config); + let sid = match index { + Some(index) => SourceId::for_registry(&index.to_url()?)?, + None => SourceId::crates_io(config)?, + }; + let api_host = { + let mut src = RegistrySource::remote(&sid, config); + src.update().chain_err(|| { + format!("failed to update {}", sid) + })?; + (src.config()?).unwrap().api + }; + let handle = http_handle(config)?; + Ok((Registry::new_handle(api_host, token, handle), sid)) +} + +/// Create a new HTTP handle with appropriate global configuration for cargo. +pub fn http_handle(config: &Config) -> CargoResult { + if !config.network_allowed() { + bail!("attempting to make an HTTP request, but --frozen was \ + specified") + } + + // The timeout option for libcurl by default times out the entire transfer, + // but we probably don't want this. Instead we only set timeouts for the + // connect phase as well as a "low speed" timeout so if we don't receive + // many bytes in a large-ish period of time then we time out. + let mut handle = Easy::new(); + handle.connect_timeout(Duration::new(30, 0))?; + handle.low_speed_limit(10 /* bytes per second */)?; + handle.low_speed_time(Duration::new(30, 0))?; + handle.useragent(&version().to_string())?; + if let Some(proxy) = http_proxy(config)? { + handle.proxy(&proxy)?; + } + if let Some(cainfo) = config.get_path("http.cainfo")? { + handle.cainfo(&cainfo.val)?; + } + if let Some(check) = config.get_bool("http.check-revoke")? { + handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; + } + if let Some(timeout) = http_timeout(config)? { + handle.connect_timeout(Duration::new(timeout as u64, 0))?; + handle.low_speed_time(Duration::new(timeout as u64, 0))?; + } + Ok(handle) +} + +/// Find an explicit HTTP proxy if one is available. +/// +/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified +/// via environment variables are picked up by libcurl. +fn http_proxy(config: &Config) -> CargoResult> { + if let Some(s) = config.get_string("http.proxy")? { + return Ok(Some(s.val)) + } + if let Ok(cfg) = git2::Config::open_default() { + if let Ok(s) = cfg.get_str("http.proxy") { + return Ok(Some(s.to_string())) + } + } + Ok(None) +} + +/// Determine if an http proxy exists. +/// +/// Checks the following for existence, in order: +/// +/// * cargo's `http.proxy` +/// * git's `http.proxy` +/// * `http_proxy` env var +/// * `HTTP_PROXY` env var +/// * `https_proxy` env var +/// * `HTTPS_PROXY` env var +pub fn http_proxy_exists(config: &Config) -> CargoResult { + if http_proxy(config)?.is_some() { + Ok(true) + } else { + Ok(["http_proxy", "HTTP_PROXY", + "https_proxy", "HTTPS_PROXY"].iter().any(|v| env::var(v).is_ok())) + } +} + +pub fn http_timeout(config: &Config) -> CargoResult> { + if let Some(s) = config.get_i64("http.timeout")? { + return Ok(Some(s.val)) + } + Ok(env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) +} + +pub fn registry_login(config: &Config, token: String) -> CargoResult<()> { + let RegistryConfig { token: old_token, .. } = registry_configuration(config)?; + if let Some(old_token) = old_token { + if old_token == token { + return Ok(()); + } + } + + config::save_credentials(config, token) +} + +pub struct OwnersOptions { + pub krate: Option, + pub token: Option, + pub index: Option, + pub to_add: Option>, + pub to_remove: Option>, + pub list: bool, +} + +pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> { + let name = match opts.krate { + Some(ref name) => name.clone(), + None => { + let manifest_path = find_root_manifest_for_wd(None, config.cwd())?; + let pkg = Package::for_path(&manifest_path, config)?; + pkg.name().to_string() + } + }; + + let (mut registry, _) = registry(config, opts.token.clone(), + opts.index.clone())?; + + if let Some(ref v) = opts.to_add { + let v = v.iter().map(|s| &s[..]).collect::>(); + let msg = registry.add_owners(&name, &v).map_err(|e| { + CargoError::from(format!("failed to invite owners to crate {}: {}", name, e)) + })?; + + config.shell().status("Owner", msg)?; + } + + if let Some(ref v) = opts.to_remove { + let v = v.iter().map(|s| &s[..]).collect::>(); + config.shell().status("Owner", format!("removing {:?} from crate {}", + v, name))?; + registry.remove_owners(&name, &v).map_err(|e| { + CargoError::from(format!("failed to remove owners from crate {}: {}", name, e)) + })?; + } + + if opts.list { + let owners = registry.list_owners(&name).map_err(|e| { + CargoError::from(format!("failed to list owners of crate {}: {}", name, e)) + })?; + for owner in owners.iter() { + print!("{}", owner.login); + match (owner.name.as_ref(), owner.email.as_ref()) { + (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), + (Some(s), None) | + (None, Some(s)) => println!(" ({})", s), + (None, None) => println!(""), + } + } + } + + Ok(()) +} + +pub fn yank(config: &Config, + krate: Option, + version: Option, + token: Option, + index: Option, + undo: bool) -> CargoResult<()> { + let name = match krate { + Some(name) => name, + None => { + let manifest_path = find_root_manifest_for_wd(None, config.cwd())?; + let pkg = Package::for_path(&manifest_path, config)?; + pkg.name().to_string() + } + }; + let version = match version { + Some(v) => v, + None => bail!("a version must be specified to yank") + }; + + let (mut registry, _) = registry(config, token, index)?; + + if undo { + config.shell().status("Unyank", format!("{}:{}", name, version))?; + registry.unyank(&name, &version).map_err(|e| { + CargoError::from(format!("failed to undo a yank: {}", e)) + })?; + } else { + config.shell().status("Yank", format!("{}:{}", name, version))?; + registry.yank(&name, &version).map_err(|e| { + CargoError::from(format!("failed to yank: {}", e)) + })?; + } + + Ok(()) +} + +pub fn search(query: &str, + config: &Config, + index: Option, + limit: u8) -> CargoResult<()> { + fn truncate_with_ellipsis(s: &str, max_length: usize) -> String { + if s.len() < max_length { + s.to_string() + } else { + format!("{}…", &s[..max_length - 1]) + } + } + + let (mut registry, _) = registry(config, None, index)?; + let (crates, total_crates) = registry.search(query, limit).map_err(|e| { + CargoError::from(format!("failed to retrieve search results from the registry: {}", e)) + })?; + + let list_items = crates.iter() + .map(|krate| ( + format!("{} = \"{}\"", krate.name, krate.max_version), + krate.description.as_ref().map(|desc| + truncate_with_ellipsis(&desc.replace("\n", " "), 128)) + )) + .collect::>(); + let description_margin = list_items.iter() + .map(|&(ref left, _)| left.len() + 4) + .max() + .unwrap_or(0); + + for (name, description) in list_items.into_iter() { + let line = match description { + Some(desc) => { + let space = repeat(' ').take(description_margin - name.len()) + .collect::(); + name + &space + "# " + &desc + } + None => name + }; + println!("{}", line); + } + + let search_max_limit = 100; + if total_crates > u32::from(limit) && limit < search_max_limit { + println!("... and {} crates more (use --limit N to see more)", + total_crates - u32::from(limit)); + } else if total_crates > u32::from(limit) && limit >= search_max_limit { + println!("... and {} crates more (go to http://crates.io/search?q={} to see more)", + total_crates - u32::from(limit), + percent_encode(query.as_bytes(), QUERY_ENCODE_SET)); + } + + Ok(()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/ops/resolve.rs b/collector/compile-benchmarks/cargo/src/cargo/ops/resolve.rs new file mode 100644 index 000000000..f26eb8e97 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/ops/resolve.rs @@ -0,0 +1,311 @@ +use std::collections::HashSet; + +use core::{PackageId, PackageIdSpec, PackageSet, Source, SourceId, Workspace}; +use core::registry::PackageRegistry; +use core::resolver::{self, Resolve, Method}; +use sources::PathSource; +use ops; +use util::profile; +use util::errors::{CargoResult, CargoResultExt}; + +/// Resolve all dependencies for the workspace using the previous +/// lockfile as a guide if present. +/// +/// This function will also write the result of resolution as a new +/// lockfile. +pub fn resolve_ws<'a>(ws: &Workspace<'a>) -> CargoResult<(PackageSet<'a>, Resolve)> { + let mut registry = PackageRegistry::new(ws.config())?; + let resolve = resolve_with_registry(ws, &mut registry, true)?; + let packages = get_resolved_packages(&resolve, registry); + Ok((packages, resolve)) +} + +/// Resolves dependencies for some packages of the workspace, +/// taking into account `paths` overrides and activated features. +pub fn resolve_ws_precisely<'a>(ws: &Workspace<'a>, + source: Option>, + features: &[String], + all_features: bool, + no_default_features: bool, + specs: &[PackageIdSpec]) + -> CargoResult<(PackageSet<'a>, Resolve)> { + let features = features.iter() + .flat_map(|s| s.split_whitespace()) + .flat_map(|s| s.split(',')) + .filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + .collect::>(); + + let mut registry = PackageRegistry::new(ws.config())?; + if let Some(source) = source { + registry.add_preloaded(source); + } + + let resolve = if ws.require_optional_deps() { + // First, resolve the root_package's *listed* dependencies, as well as + // downloading and updating all remotes and such. + let resolve = resolve_with_registry(ws, &mut registry, false)?; + + // Second, resolve with precisely what we're doing. Filter out + // transitive dependencies if necessary, specify features, handle + // overrides, etc. + let _p = profile::start("resolving w/ overrides..."); + + add_overrides(&mut registry, ws)?; + + for &(ref replace_spec, ref dep) in ws.root_replace() { + if !resolve.iter().any(|r| replace_spec.matches(r) && !dep.matches_id(r)) { + ws.config().shell().warn( + format!("package replacement is not used: {}", replace_spec) + )? + } + } + + Some(resolve) + } else { + None + }; + + let method = if all_features { + Method::Everything + } else { + Method::Required { + dev_deps: true, // TODO: remove this option? + features: &features, + uses_default_features: !no_default_features, + } + }; + + let resolved_with_overrides = + ops::resolve_with_previous(&mut registry, ws, + method, resolve.as_ref(), None, + specs, true)?; + + let packages = get_resolved_packages(&resolved_with_overrides, registry); + + Ok((packages, resolved_with_overrides)) +} + +fn resolve_with_registry(ws: &Workspace, registry: &mut PackageRegistry, warn: bool) + -> CargoResult { + let prev = ops::load_pkg_lockfile(ws)?; + let resolve = resolve_with_previous(registry, ws, + Method::Everything, + prev.as_ref(), None, &[], warn)?; + + if !ws.is_ephemeral() { + ops::write_pkg_lockfile(ws, &resolve)?; + } + Ok(resolve) +} + + +/// Resolve all dependencies for a package using an optional previous instance +/// of resolve to guide the resolution process. +/// +/// This also takes an optional hash set, `to_avoid`, which is a list of package +/// ids that should be avoided when consulting the previous instance of resolve +/// (often used in pairings with updates). +/// +/// The previous resolve normally comes from a lockfile. This function does not +/// read or write lockfiles from the filesystem. +pub fn resolve_with_previous<'a>(registry: &mut PackageRegistry, + ws: &Workspace, + method: Method, + previous: Option<&'a Resolve>, + to_avoid: Option<&HashSet<&'a PackageId>>, + specs: &[PackageIdSpec], + warn: bool) + -> CargoResult { + // Here we place an artificial limitation that all non-registry sources + // cannot be locked at more than one revision. This means that if a git + // repository provides more than one package, they must all be updated in + // step when any of them are updated. + // + // TODO: This seems like a hokey reason to single out the registry as being + // different + let mut to_avoid_sources = HashSet::new(); + if let Some(to_avoid) = to_avoid { + to_avoid_sources.extend(to_avoid.iter() + .map(|p| p.source_id()) + .filter(|s| !s.is_registry())); + } + + let ref keep = |p: &&'a PackageId| { + !to_avoid_sources.contains(&p.source_id()) && match to_avoid { + Some(set) => !set.contains(p), + None => true, + } + }; + + // In the case where a previous instance of resolve is available, we + // want to lock as many packages as possible to the previous version + // without disturbing the graph structure. To this end we perform + // two actions here: + // + // 1. We inform the package registry of all locked packages. This + // involves informing it of both the locked package's id as well + // as the versions of all locked dependencies. The registry will + // then takes this information into account when it is queried. + // + // 2. The specified package's summary will have its dependencies + // modified to their precise variants. This will instruct the + // first step of the resolution process to not query for ranges + // but rather for precise dependency versions. + // + // This process must handle altered dependencies, however, as + // it's possible for a manifest to change over time to have + // dependencies added, removed, or modified to different version + // ranges. To deal with this, we only actually lock a dependency + // to the previously resolved version if the dependency listed + // still matches the locked version. + if let Some(r) = previous { + trace!("previous: {:?}", r); + for node in r.iter().filter(keep) { + let deps = r.deps_not_replaced(node) + .filter(keep) + .cloned().collect(); + registry.register_lock(node.clone(), deps); + } + } + + for (url, patches) in ws.root_patch() { + let previous = match previous { + Some(r) => r, + None => { + registry.patch(url, patches)?; + continue + } + }; + let patches = patches.iter().map(|dep| { + let unused = previous.unused_patches(); + let candidates = previous.iter().chain(unused); + match candidates.filter(keep).find(|id| dep.matches_id(id)) { + Some(id) => { + let mut dep = dep.clone(); + dep.lock_to(id); + dep + } + None => dep.clone(), + } + }).collect::>(); + registry.patch(url, &patches)?; + } + + let mut summaries = Vec::new(); + for member in ws.members() { + registry.add_sources(&[member.package_id().source_id().clone()])?; + let method_to_resolve = match method { + // When everything for a workspace we want to be sure to resolve all + // members in the workspace, so propagate the `Method::Everything`. + Method::Everything => Method::Everything, + + // If we're not resolving everything though then we're constructing the + // exact crate graph we're going to build. Here we don't necessarily + // want to keep around all workspace crates as they may not all be + // built/tested. + // + // Additionally, the `method` specified represents command line + // flags, which really only matters for the current package + // (determined by the cwd). If other packages are specified (via + // `-p`) then the command line flags like features don't apply to + // them. + // + // As a result, if this `member` is the current member of the + // workspace, then we use `method` specified. Otherwise we use a + // base method with no features specified but using default features + // for any other packages specified with `-p`. + Method::Required { dev_deps, .. } => { + let base = Method::Required { + dev_deps: dev_deps, + features: &[], + uses_default_features: true, + }; + let member_id = member.package_id(); + match ws.current_opt() { + Some(current) if member_id == current.package_id() => method, + _ => { + if specs.iter().any(|spec| spec.matches(member_id)) { + base + } else { + continue + } + } + } + } + }; + + let summary = registry.lock(member.summary().clone()); + summaries.push((summary, method_to_resolve)); + } + + let root_replace = ws.root_replace(); + + let replace = match previous { + Some(r) => { + root_replace.iter().map(|&(ref spec, ref dep)| { + for (key, val) in r.replacements().iter() { + if spec.matches(key) && dep.matches_id(val) && keep(&val) { + let mut dep = dep.clone(); + dep.lock_to(val); + return (spec.clone(), dep) + } + } + (spec.clone(), dep.clone()) + }).collect::>() + } + None => root_replace.to_vec(), + }; + + let config = if warn { + Some(ws.config()) + } else { + None + }; + let mut resolved = resolver::resolve(&summaries, + &replace, + registry, + config)?; + resolved.register_used_patches(registry.patches()); + if let Some(previous) = previous { + resolved.merge_from(previous)?; + } + Ok(resolved) +} + +/// Read the `paths` configuration variable to discover all path overrides that +/// have been configured. +fn add_overrides<'a>(registry: &mut PackageRegistry<'a>, + ws: &Workspace<'a>) -> CargoResult<()> { + let paths = match ws.config().get_list("paths")? { + Some(list) => list, + None => return Ok(()) + }; + + let paths = paths.val.iter().map(|&(ref s, ref p)| { + // The path listed next to the string is the config file in which the + // key was located, so we want to pop off the `.cargo/config` component + // to get the directory containing the `.cargo` folder. + (p.parent().unwrap().parent().unwrap().join(s), p) + }); + + for (path, definition) in paths { + let id = SourceId::for_path(&path)?; + let mut source = PathSource::new_recursive(&path, &id, ws.config()); + source.update().chain_err(|| { + format!("failed to update path override `{}` \ + (defined in `{}`)", path.display(), + definition.display()) + })?; + registry.add_override(Box::new(source)); + } + Ok(()) +} + +fn get_resolved_packages<'a>(resolve: &Resolve, + registry: PackageRegistry<'a>) + -> PackageSet<'a> { + let ids: Vec = resolve.iter().cloned().collect(); + registry.get(&ids) +} + diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/config.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/config.rs new file mode 100644 index 000000000..5aa44110a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/config.rs @@ -0,0 +1,226 @@ +//! Implementation of configuration for various sources +//! +//! This module will parse the various `source.*` TOML configuration keys into a +//! structure usable by Cargo itself. Currently this is primarily used to map +//! sources to one another via the `replace-with` key in `.cargo/config`. + +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use url::Url; + +use core::{Source, SourceId, GitReference}; +use sources::ReplacedSource; +use util::{Config, ToUrl}; +use util::config::ConfigValue; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +#[derive(Clone)] +pub struct SourceConfigMap<'cfg> { + cfgs: HashMap, + id2name: HashMap, + config: &'cfg Config, +} + +/// Configuration for a particular source, found in TOML looking like: +/// +/// ```toml +/// [source.crates-io] +/// registry = 'https://github.com/rust-lang/crates.io-index' +/// replace-with = 'foo' # optional +/// ``` +#[derive(Clone)] +struct SourceConfig { + // id this source corresponds to, inferred from the various defined keys in + // the configuration + id: SourceId, + + // Name of the source that this source should be replaced with. This field + // is a tuple of (name, path) where path is where this configuration key was + // defined (the literal `.cargo/config` file). + replace_with: Option<(String, PathBuf)>, +} + +impl<'cfg> SourceConfigMap<'cfg> { + pub fn new(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap::empty(config)?; + if let Some(table) = config.get_table("source")? { + for (key, value) in table.val.iter() { + base.add_config(key, value)?; + } + } + Ok(base) + } + + pub fn empty(config: &'cfg Config) -> CargoResult> { + let mut base = SourceConfigMap { + cfgs: HashMap::new(), + id2name: HashMap::new(), + config: config, + }; + base.add("crates-io", SourceConfig { + id: SourceId::crates_io(config)?, + replace_with: None, + }); + Ok(base) + } + + pub fn config(&self) -> &'cfg Config { + self.config + } + + pub fn load(&self, id: &SourceId) -> CargoResult> { + debug!("loading: {}", id); + let mut name = match self.id2name.get(id) { + Some(name) => name, + None => return Ok(id.load(self.config)?), + }; + let mut path = Path::new("/"); + let orig_name = name; + let new_id; + loop { + let cfg = match self.cfgs.get(name) { + Some(cfg) => cfg, + None => bail!("could not find a configured source with the \ + name `{}` when attempting to lookup `{}` \ + (configuration in `{}`)", + name, orig_name, path.display()), + }; + match cfg.replace_with { + Some((ref s, ref p)) => { + name = s; + path = p; + } + None if *id == cfg.id => return Ok(id.load(self.config)?), + None => { + new_id = cfg.id.with_precise(id.precise() + .map(|s| s.to_string())); + break + } + } + debug!("following pointer to {}", name); + if name == orig_name { + bail!("detected a cycle of `replace-with` sources, the source \ + `{}` is eventually replaced with itself \ + (configuration in `{}`)", name, path.display()) + } + } + let new_src = new_id.load(self.config)?; + let old_src = id.load(self.config)?; + if !new_src.supports_checksums() && old_src.supports_checksums() { + bail!("\ +cannot replace `{orig}` with `{name}`, the source `{orig}` supports \ +checksums, but `{name}` does not + +a lock file compatible with `{orig}` cannot be generated in this situation +", orig = orig_name, name = name); + } + + if old_src.requires_precise() && id.precise().is_none() { + bail!("\ +the source {orig} requires a lock file to be present first before it can be +used against vendored source code + +remove the source replacement configuration, generate a lock file, and then +restore the source replacement configuration to continue the build +", orig = orig_name); + } + + Ok(Box::new(ReplacedSource::new(id, &new_id, new_src))) + } + + fn add(&mut self, name: &str, cfg: SourceConfig) { + self.id2name.insert(cfg.id.clone(), name.to_string()); + self.cfgs.insert(name.to_string(), cfg); + } + + fn add_config(&mut self, name: &str, cfg: &ConfigValue) -> CargoResult<()> { + let (table, _path) = cfg.table(&format!("source.{}", name))?; + let mut srcs = Vec::new(); + if let Some(val) = table.get("registry") { + let url = url(val, &format!("source.{}.registry", name))?; + srcs.push(SourceId::for_registry(&url)?); + } + if let Some(val) = table.get("local-registry") { + let (s, path) = val.string(&format!("source.{}.local-registry", + name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_local_registry(&path)?); + } + if let Some(val) = table.get("directory") { + let (s, path) = val.string(&format!("source.{}.directory", + name))?; + let mut path = path.to_path_buf(); + path.pop(); + path.pop(); + path.push(s); + srcs.push(SourceId::for_directory(&path)?); + } + if let Some(val) = table.get("git") { + let url = url(val, &format!("source.{}.git", name))?; + let try = |s: &str| { + let val = match table.get(s) { + Some(s) => s, + None => return Ok(None), + }; + let key = format!("source.{}.{}", name, s); + val.string(&key).map(Some) + }; + let reference = match try("branch")? { + Some(b) => GitReference::Branch(b.0.to_string()), + None => { + match try("tag")? { + Some(b) => GitReference::Tag(b.0.to_string()), + None => { + match try("rev")? { + Some(b) => GitReference::Rev(b.0.to_string()), + None => GitReference::Branch("master".to_string()), + } + } + } + } + }; + srcs.push(SourceId::for_git(&url, reference)?); + } + if name == "crates-io" && srcs.is_empty() { + srcs.push(SourceId::crates_io(self.config)?); + } + + let mut srcs = srcs.into_iter(); + let src = srcs.next().ok_or_else(|| { + CargoError::from(format!("no source URL specified for `source.{}`, need \ + either `registry` or `local-registry` defined", + name)) + })?; + if srcs.next().is_some() { + return Err(format!("more than one source URL specified for \ + `source.{}`", name).into()) + } + + let mut replace_with = None; + if let Some(val) = table.get("replace-with") { + let (s, path) = val.string(&format!("source.{}.replace-with", + name))?; + replace_with = Some((s.to_string(), path.to_path_buf())); + } + + self.add(name, SourceConfig { + id: src, + replace_with: replace_with, + }); + + return Ok(()); + + fn url(cfg: &ConfigValue, key: &str) -> CargoResult { + let (url, path) = cfg.string(key)?; + url.to_url().chain_err(|| { + format!("configuration key `{}` specified an invalid \ + URL (in {})", key, path.display()) + + }) + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/directory.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/directory.rs new file mode 100644 index 000000000..902d64d22 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/directory.rs @@ -0,0 +1,204 @@ +use std::collections::HashMap; +use std::fmt::{self, Debug, Formatter}; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; + +use hex::ToHex; + +use serde_json; + +use core::{Package, PackageId, Summary, SourceId, Source, Dependency, Registry}; +use sources::PathSource; +use util::{Config, Sha256}; +use util::errors::{CargoResult, CargoResultExt}; +use util::paths; + +pub struct DirectorySource<'cfg> { + source_id: SourceId, + root: PathBuf, + packages: HashMap, + config: &'cfg Config, +} + +#[derive(Deserialize)] +struct Checksum { + package: Option, + files: HashMap, +} + +impl<'cfg> DirectorySource<'cfg> { + pub fn new(path: &Path, id: &SourceId, config: &'cfg Config) + -> DirectorySource<'cfg> { + DirectorySource { + source_id: id.clone(), + root: path.to_path_buf(), + config: config, + packages: HashMap::new(), + } + } +} + +impl<'cfg> Debug for DirectorySource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "DirectorySource {{ root: {:?} }}", self.root) + } +} + +impl<'cfg> Registry for DirectorySource<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + let packages = self.packages.values().map(|p| &p.0); + let matches = packages.filter(|pkg| dep.matches(pkg.summary())); + for summary in matches.map(|pkg| pkg.summary().clone()) { + f(summary); + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + true + } +} + +impl<'cfg> Source for DirectorySource<'cfg> { + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + self.packages.clear(); + let entries = self.root.read_dir().chain_err(|| { + format!("failed to read root of directory source: {}", + self.root.display()) + })?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + // Ignore hidden/dot directories as they typically don't contain + // crates and otherwise may conflict with a VCS + // (rust-lang/cargo#3414). + if let Some(s) = path.file_name().and_then(|s| s.to_str()) { + if s.starts_with('.') { + continue + } + } + + // Vendor directories are often checked into a VCS, but throughout + // the lifetime of a vendor dir crates are often added and deleted. + // Some VCS implementations don't always fully delete the directory + // when a dir is removed from a different checkout. Sometimes a + // mostly-empty dir is left behind. + // + // To help work Cargo work by default in more cases we try to + // handle this case by default. If the directory looks like it only + // has dotfiles in it (or no files at all) then we skip it. + // + // In general we don't want to skip completely malformed directories + // to help with debugging, so we don't just ignore errors in + // `update` below. + let mut only_dotfile = true; + for entry in path.read_dir()?.filter_map(|e| e.ok()) { + if let Some(s) = entry.file_name().to_str() { + if s.starts_with('.') { + continue + } + } + only_dotfile = false; + } + if only_dotfile { + continue + } + + let mut src = PathSource::new(&path, &self.source_id, self.config); + src.update()?; + let pkg = src.root_package()?; + + let cksum_file = path.join(".cargo-checksum.json"); + let cksum = paths::read(&path.join(cksum_file)).chain_err(|| { + format!("failed to load checksum `.cargo-checksum.json` \ + of {} v{}", + pkg.package_id().name(), + pkg.package_id().version()) + + })?; + let cksum: Checksum = serde_json::from_str(&cksum).chain_err(|| { + format!("failed to decode `.cargo-checksum.json` of \ + {} v{}", + pkg.package_id().name(), + pkg.package_id().version()) + })?; + + let mut manifest = pkg.manifest().clone(); + let mut summary = manifest.summary().clone(); + if let Some(ref package) = cksum.package { + summary = summary.set_checksum(package.clone()); + } + manifest.set_summary(summary); + let pkg = Package::new(manifest, pkg.manifest_path()); + self.packages.insert(pkg.package_id().clone(), (pkg, cksum)); + } + + Ok(()) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + self.packages.get(id).map(|p| &p.0).cloned().ok_or_else(|| { + format!("failed to find package with id: {}", id).into() + }) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } + + fn verify(&self, id: &PackageId) -> CargoResult<()> { + let (pkg, cksum) = match self.packages.get(id) { + Some(&(ref pkg, ref cksum)) => (pkg, cksum), + None => bail!("failed to find entry for `{}` in directory source", + id), + }; + + let mut buf = [0; 16 * 1024]; + for (file, cksum) in cksum.files.iter() { + let mut h = Sha256::new(); + let file = pkg.root().join(file); + + (|| -> CargoResult<()> { + let mut f = File::open(&file)?; + loop { + match f.read(&mut buf)? { + 0 => return Ok(()), + n => h.update(&buf[..n]), + } + } + })().chain_err(|| { + format!("failed to calculate checksum of: {}", + file.display()) + })?; + + let actual = h.finish().to_hex(); + if &*actual != cksum { + bail!("\ + the listed checksum of `{}` has changed:\n\ + expected: {}\n\ + actual: {}\n\ + \n\ + directory sources are not intended to be edited, if \ + modifications are required then it is recommended \ + that [replace] is used with a forked copy of the \ + source\ + ", file.display(), cksum, actual); + } + } + + Ok(()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/git/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/git/mod.rs new file mode 100644 index 000000000..0ef4db4d6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/git/mod.rs @@ -0,0 +1,4 @@ +pub use self::utils::{GitRemote, GitDatabase, GitCheckout, GitRevision, fetch}; +pub use self::source::{GitSource, canonicalize_url}; +mod utils; +mod source; diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/git/source.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/git/source.rs new file mode 100644 index 000000000..13e266b04 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/git/source.rs @@ -0,0 +1,268 @@ +use std::fmt::{self, Debug, Formatter}; + +use url::Url; + +use core::source::{Source, SourceId}; +use core::GitReference; +use core::{Package, PackageId, Summary, Registry, Dependency}; +use util::Config; +use util::errors::{CargoError, CargoResult}; +use util::hex::short_hash; +use sources::PathSource; +use sources::git::utils::{GitRemote, GitRevision}; + +/* TODO: Refactor GitSource to delegate to a PathSource + */ +pub struct GitSource<'cfg> { + remote: GitRemote, + reference: GitReference, + source_id: SourceId, + path_source: Option>, + rev: Option, + ident: String, + config: &'cfg Config, +} + +impl<'cfg> GitSource<'cfg> { + pub fn new(source_id: &SourceId, + config: &'cfg Config) -> CargoResult> { + assert!(source_id.is_git(), "id is not git, id={}", source_id); + + let remote = GitRemote::new(source_id.url()); + let ident = ident(source_id.url())?; + + let reference = match source_id.precise() { + Some(s) => GitReference::Rev(s.to_string()), + None => source_id.git_reference().unwrap().clone(), + }; + + let source = GitSource { + remote: remote, + reference: reference, + source_id: source_id.clone(), + path_source: None, + rev: None, + ident: ident, + config: config, + }; + + Ok(source) + } + + pub fn url(&self) -> &Url { self.remote.url() } + + pub fn read_packages(&mut self) -> CargoResult> { + if self.path_source.is_none() { + self.update()?; + } + self.path_source.as_mut().unwrap().read_packages() + } +} + +fn ident(url: &Url) -> CargoResult { + let url = canonicalize_url(url)?; + let ident = url.path_segments().and_then(|mut s| s.next_back()).unwrap_or(""); + + let ident = if ident == "" { + "_empty" + } else { + ident + }; + + Ok(format!("{}-{}", ident, short_hash(&url))) +} + +// Some hacks and heuristics for making equivalent URLs hash the same +pub fn canonicalize_url(url: &Url) -> CargoResult { + let mut url = url.clone(); + + // cannot-be-a-base-urls are not supported + // eg. github.com:rust-lang-nursery/rustfmt.git + if url.cannot_be_a_base() { + return Err(format!("invalid url `{}`: cannot-be-a-base-URLs are not supported", url).into()); + } + + // Strip a trailing slash + if url.path().ends_with('/') { + url.path_segments_mut().unwrap().pop_if_empty(); + } + + // HACKHACK: For github URL's specifically just lowercase + // everything. GitHub treats both the same, but they hash + // differently, and we're gonna be hashing them. This wants a more + // general solution, and also we're almost certainly not using the + // same case conversion rules that GitHub does. (#84) + if url.host_str() == Some("github.com") { + url.set_scheme("https").unwrap(); + let path = url.path().to_lowercase(); + url.set_path(&path); + } + + // Repos generally can be accessed with or w/o '.git' + let needs_chopping = url.path().ends_with(".git"); + if needs_chopping { + let last = { + let last = url.path_segments().unwrap().next_back().unwrap(); + last[..last.len() - 4].to_owned() + }; + url.path_segments_mut().unwrap().pop().push(&last); + } + + Ok(url) +} + +impl<'cfg> Debug for GitSource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "git repo at {}", self.remote.url())?; + + match self.reference.pretty_ref() { + Some(s) => write!(f, " ({})", s), + None => Ok(()) + } + } +} + +impl<'cfg> Registry for GitSource<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + let src = self.path_source.as_mut() + .expect("BUG: update() must be called before query()"); + src.query(dep, f) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + true + } +} + +impl<'cfg> Source for GitSource<'cfg> { + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + let lock = self.config.git_path() + .open_rw(".cargo-lock-git", self.config, "the git checkouts")?; + + let db_path = lock.parent().join("db").join(&self.ident); + + // Resolve our reference to an actual revision, and check if the + // database already has that revision. If it does, we just load a + // database pinned at that revision, and if we don't we issue an update + // to try to find the revision. + let actual_rev = self.remote.rev_for(&db_path, &self.reference); + let should_update = actual_rev.is_err() || + self.source_id.precise().is_none(); + + let (repo, actual_rev) = if should_update { + self.config.shell().status("Updating", + format!("git repository `{}`", self.remote.url()))?; + + trace!("updating git source `{:?}`", self.remote); + + let repo = self.remote.checkout(&db_path, self.config)?; + let rev = repo.rev_for(&self.reference).map_err(CargoError::into_internal)?; + (repo, rev) + } else { + (self.remote.db_at(&db_path)?, actual_rev.unwrap()) + }; + + // Don’t use the full hash, + // to contribute less to reaching the path length limit on Windows: + // https://github.com/servo/servo/pull/14397 + let short_id = repo.to_short_id(actual_rev.clone()).unwrap(); + + let checkout_path = lock.parent().join("checkouts") + .join(&self.ident).join(short_id.as_str()); + + // Copy the database to the checkout location. After this we could drop + // the lock on the database as we no longer needed it, but we leave it + // in scope so the destructors here won't tamper with too much. + // Checkout is immutable, so we don't need to protect it with a lock once + // it is created. + repo.copy_to(actual_rev.clone(), &checkout_path, self.config)?; + + let source_id = self.source_id.with_precise(Some(actual_rev.to_string())); + let path_source = PathSource::new_recursive(&checkout_path, + &source_id, + self.config); + + self.path_source = Some(path_source); + self.rev = Some(actual_rev); + self.path_source.as_mut().unwrap().update() + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + trace!("getting packages for package id `{}` from `{:?}`", id, + self.remote); + self.path_source.as_mut() + .expect("BUG: update() must be called before get()") + .download(id) + } + + fn fingerprint(&self, _pkg: &Package) -> CargoResult { + Ok(self.rev.as_ref().unwrap().to_string()) + } +} + +#[cfg(test)] +mod test { + use url::Url; + use super::ident; + use util::ToUrl; + + #[test] + pub fn test_url_to_path_ident_with_path() { + let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap(); + assert!(ident.starts_with("cargo-")); + } + + #[test] + pub fn test_url_to_path_ident_without_path() { + let ident = ident(&url("https://github.com")).unwrap(); + assert!(ident.starts_with("_empty-")); + } + + #[test] + fn test_canonicalize_idents_by_stripping_trailing_url_slash() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_lowercasing_github_urls() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_by_stripping_dot_git() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_idents_different_protocols() { + let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap(); + let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap(); + assert_eq!(ident1, ident2); + } + + #[test] + fn test_canonicalize_cannot_be_a_base_urls() { + assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err()); + assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err()); + } + + fn url(s: &str) -> Url { + s.to_url().unwrap() + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/git/utils.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/git/utils.rs new file mode 100644 index 000000000..505fd24a8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/git/utils.rs @@ -0,0 +1,733 @@ +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use curl::easy::{Easy, List}; +use git2::{self, ObjectType}; +use serde::ser::{self, Serialize}; +use url::Url; + +use core::GitReference; +use util::{ToUrl, internal, Config, network}; +use util::errors::{CargoResult, CargoResultExt, CargoError}; + +#[derive(PartialEq, Clone, Debug)] +pub struct GitRevision(git2::Oid); + +impl ser::Serialize for GitRevision { + fn serialize(&self, s: S) -> Result { + serialize_str(self, s) + } +} + +fn serialize_str(t: &T, s: S) -> Result + where T: fmt::Display, + S: ser::Serializer, +{ + t.to_string().serialize(s) +} + +impl fmt::Display for GitRevision { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +pub struct GitShortID(git2::Buf); + +impl GitShortID { + pub fn as_str(&self) -> &str { + self.0.as_str().unwrap() + } +} + +/// `GitRemote` represents a remote repository. It gets cloned into a local +/// `GitDatabase`. +#[derive(PartialEq, Clone, Debug, Serialize)] +pub struct GitRemote { + #[serde(serialize_with = "serialize_str")] + url: Url, +} + +/// `GitDatabase` is a local clone of a remote repository's database. Multiple +/// `GitCheckouts` can be cloned from this `GitDatabase`. +#[derive(Serialize)] +pub struct GitDatabase { + remote: GitRemote, + path: PathBuf, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +/// `GitCheckout` is a local checkout of a particular revision. Calling +/// `clone_into` with a reference will resolve the reference into a revision, +/// and return a `CargoError` if no revision for that reference was found. +#[derive(Serialize)] +pub struct GitCheckout<'a> { + database: &'a GitDatabase, + location: PathBuf, + revision: GitRevision, + #[serde(skip_serializing)] + repo: git2::Repository, +} + +// Implementations + +impl GitRemote { + pub fn new(url: &Url) -> GitRemote { + GitRemote { url: url.clone() } + } + + pub fn url(&self) -> &Url { + &self.url + } + + pub fn rev_for(&self, path: &Path, reference: &GitReference) + -> CargoResult { + let db = self.db_at(path)?; + db.rev_for(reference) + } + + pub fn checkout(&self, into: &Path, cargo_config: &Config) -> CargoResult { + let repo = match git2::Repository::open(into) { + Ok(mut repo) => { + self.fetch_into(&mut repo, cargo_config).chain_err(|| { + format!("failed to fetch into {}", into.display()) + })?; + repo + } + Err(..) => { + self.clone_into(into, cargo_config).chain_err(|| { + format!("failed to clone into: {}", into.display()) + })? + } + }; + + Ok(GitDatabase { + remote: self.clone(), + path: into.to_path_buf(), + repo: repo, + }) + } + + pub fn db_at(&self, db_path: &Path) -> CargoResult { + let repo = git2::Repository::open(db_path)?; + Ok(GitDatabase { + remote: self.clone(), + path: db_path.to_path_buf(), + repo: repo, + }) + } + + fn fetch_into(&self, dst: &mut git2::Repository, cargo_config: &Config) -> CargoResult<()> { + // Create a local anonymous remote in the repository to fetch the url + let refspec = "refs/heads/*:refs/heads/*"; + fetch(dst, &self.url, refspec, cargo_config) + } + + fn clone_into(&self, dst: &Path, cargo_config: &Config) -> CargoResult { + if fs::metadata(&dst).is_ok() { + fs::remove_dir_all(dst)?; + } + fs::create_dir_all(dst)?; + let mut repo = git2::Repository::init_bare(dst)?; + fetch(&mut repo, &self.url, "refs/heads/*:refs/heads/*", cargo_config)?; + Ok(repo) + } +} + +impl GitDatabase { + fn path(&self) -> &Path { + &self.path + } + + pub fn copy_to(&self, rev: GitRevision, dest: &Path, cargo_config: &Config) + -> CargoResult { + let checkout = match git2::Repository::open(dest) { + Ok(repo) => { + let mut checkout = GitCheckout::new(dest, self, rev, repo); + if !checkout.is_fresh() { + checkout.fetch(cargo_config)?; + checkout.reset()?; + assert!(checkout.is_fresh()); + } + checkout + } + Err(..) => GitCheckout::clone_into(dest, self, rev)?, + }; + checkout.update_submodules(cargo_config)?; + Ok(checkout) + } + + pub fn rev_for(&self, reference: &GitReference) -> CargoResult { + let id = match *reference { + GitReference::Tag(ref s) => { + (|| -> CargoResult { + let refname = format!("refs/tags/{}", s); + let id = self.repo.refname_to_id(&refname)?; + let obj = self.repo.find_object(id, None)?; + let obj = obj.peel(ObjectType::Commit)?; + Ok(obj.id()) + })().chain_err(|| { + format!("failed to find tag `{}`", s) + })? + } + GitReference::Branch(ref s) => { + (|| { + let b = self.repo.find_branch(s, git2::BranchType::Local)?; + b.get().target().ok_or_else(|| { + CargoError::from(format!("branch `{}` did not have a target", s)) + }) + })().chain_err(|| { + format!("failed to find branch `{}`", s) + })? + } + GitReference::Rev(ref s) => { + let obj = self.repo.revparse_single(s)?; + match obj.as_tag() { + Some(tag) => tag.target_id(), + None => obj.id(), + } + } + }; + Ok(GitRevision(id)) + } + + pub fn to_short_id(&self, revision: GitRevision) -> CargoResult { + let obj = self.repo.find_object(revision.0, None)?; + Ok(GitShortID(obj.short_id()?)) + } + + pub fn has_ref(&self, reference: &str) -> CargoResult<()> { + self.repo.revparse_single(reference)?; + Ok(()) + } +} + +impl<'a> GitCheckout<'a> { + fn new(path: &Path, database: &'a GitDatabase, revision: GitRevision, + repo: git2::Repository) + -> GitCheckout<'a> + { + GitCheckout { + location: path.to_path_buf(), + database: database, + revision: revision, + repo: repo, + } + } + + fn clone_into(into: &Path, database: &'a GitDatabase, + revision: GitRevision) + -> CargoResult> + { + let repo = GitCheckout::clone_repo(database.path(), into)?; + let checkout = GitCheckout::new(into, database, revision, repo); + checkout.reset()?; + Ok(checkout) + } + + fn clone_repo(source: &Path, into: &Path) -> CargoResult { + let dirname = into.parent().unwrap(); + + fs::create_dir_all(&dirname).chain_err(|| { + format!("Couldn't mkdir {}", dirname.display()) + })?; + + if fs::metadata(&into).is_ok() { + fs::remove_dir_all(into).chain_err(|| { + format!("Couldn't rmdir {}", into.display()) + })?; + } + + let url = source.to_url()?; + let url = url.to_string(); + let repo = git2::Repository::clone(&url, into) + .chain_err(|| { + internal(format!("failed to clone {} into {}", source.display(), + into.display())) + })?; + Ok(repo) + } + + fn is_fresh(&self) -> bool { + match self.repo.revparse_single("HEAD") { + Ok(ref head) if head.id() == self.revision.0 => { + // See comments in reset() for why we check this + fs::metadata(self.location.join(".cargo-ok")).is_ok() + } + _ => false, + } + } + + fn fetch(&mut self, cargo_config: &Config) -> CargoResult<()> { + info!("fetch {}", self.repo.path().display()); + let url = self.database.path.to_url()?; + let refspec = "refs/heads/*:refs/heads/*"; + fetch(&mut self.repo, &url, refspec, cargo_config)?; + Ok(()) + } + + fn reset(&self) -> CargoResult<()> { + // If we're interrupted while performing this reset (e.g. we die because + // of a signal) Cargo needs to be sure to try to check out this repo + // again on the next go-round. + // + // To enable this we have a dummy file in our checkout, .cargo-ok, which + // if present means that the repo has been successfully reset and is + // ready to go. Hence if we start to do a reset, we make sure this file + // *doesn't* exist, and then once we're done we create the file. + let ok_file = self.location.join(".cargo-ok"); + let _ = fs::remove_file(&ok_file); + info!("reset {} to {}", self.repo.path().display(), self.revision); + let object = self.repo.find_object(self.revision.0, None)?; + self.repo.reset(&object, git2::ResetType::Hard, None)?; + File::create(ok_file)?; + Ok(()) + } + + fn update_submodules(&self, cargo_config: &Config) -> CargoResult<()> { + return update_submodules(&self.repo, cargo_config); + + fn update_submodules(repo: &git2::Repository, cargo_config: &Config) -> CargoResult<()> { + info!("update submodules for: {:?}", repo.workdir().unwrap()); + + for mut child in repo.submodules()? { + update_submodule(repo, &mut child, cargo_config) + .map_err(CargoError::into_internal) + .chain_err(|| { + format!("failed to update submodule `{}`", + child.name().unwrap_or("")) + })?; + } + Ok(()) + } + + fn update_submodule(parent: &git2::Repository, + child: &mut git2::Submodule, + cargo_config: &Config) -> CargoResult<()> { + child.init(false)?; + let url = child.url().ok_or_else(|| { + internal("non-utf8 url for submodule") + })?; + + // A submodule which is listed in .gitmodules but not actually + // checked out will not have a head id, so we should ignore it. + let head = match child.head_id() { + Some(head) => head, + None => return Ok(()), + }; + + // If the submodule hasn't been checked out yet, we need to + // clone it. If it has been checked out and the head is the same + // as the submodule's head, then we can bail out and go to the + // next submodule. + let head_and_repo = child.open().and_then(|repo| { + let target = repo.head()?.target(); + Ok((target, repo)) + }); + let mut repo = match head_and_repo { + Ok((head, repo)) => { + if child.head_id() == head { + return Ok(()) + } + repo + } + Err(..) => { + let path = parent.workdir().unwrap().join(child.path()); + let _ = fs::remove_dir_all(&path); + git2::Repository::clone(url, &path)? + } + }; + + // Fetch data from origin and reset to the head commit + let refspec = "refs/heads/*:refs/heads/*"; + let url = url.to_url()?; + fetch(&mut repo, &url, refspec, cargo_config).chain_err(|| { + internal(format!("failed to fetch submodule `{}` from {}", + child.name().unwrap_or(""), url)) + })?; + + repo.find_object(head, None) + .and_then(|obj| { repo.reset(&obj, git2::ResetType::Hard, None)})?; + update_submodules(&repo, cargo_config) + } + } +} + +/// Prepare the authentication callbacks for cloning a git repository. +/// +/// The main purpose of this function is to construct the "authentication +/// callback" which is used to clone a repository. This callback will attempt to +/// find the right authentication on the system (without user input) and will +/// guide libgit2 in doing so. +/// +/// The callback is provided `allowed` types of credentials, and we try to do as +/// much as possible based on that: +/// +/// * Prioritize SSH keys from the local ssh agent as they're likely the most +/// reliable. The username here is prioritized from the credential +/// callback, then from whatever is configured in git itself, and finally +/// we fall back to the generic user of `git`. +/// +/// * If a username/password is allowed, then we fallback to git2-rs's +/// implementation of the credential helper. This is what is configured +/// with `credential.helper` in git, and is the interface for the OSX +/// keychain, for example. +/// +/// * After the above two have failed, we just kinda grapple attempting to +/// return *something*. +/// +/// If any form of authentication fails, libgit2 will repeatedly ask us for +/// credentials until we give it a reason to not do so. To ensure we don't +/// just sit here looping forever we keep track of authentications we've +/// attempted and we don't try the same ones again. +fn with_authentication(url: &str, cfg: &git2::Config, mut f: F) + -> CargoResult + where F: FnMut(&mut git2::Credentials) -> CargoResult +{ + let mut cred_helper = git2::CredentialHelper::new(url); + cred_helper.config(cfg); + + let mut ssh_username_requested = false; + let mut cred_helper_bad = None; + let mut ssh_agent_attempts = Vec::new(); + let mut any_attempts = false; + let mut tried_sshkey = false; + + let mut res = f(&mut |url, username, allowed| { + any_attempts = true; + // libgit2's "USERNAME" authentication actually means that it's just + // asking us for a username to keep going. This is currently only really + // used for SSH authentication and isn't really an authentication type. + // The logic currently looks like: + // + // let user = ...; + // if (user.is_null()) + // user = callback(USERNAME, null, ...); + // + // callback(SSH_KEY, user, ...) + // + // So if we're being called here then we know that (a) we're using ssh + // authentication and (b) no username was specified in the URL that + // we're trying to clone. We need to guess an appropriate username here, + // but that may involve a few attempts. Unfortunately we can't switch + // usernames during one authentication session with libgit2, so to + // handle this we bail out of this authentication session after setting + // the flag `ssh_username_requested`, and then we handle this below. + if allowed.contains(git2::USERNAME) { + debug_assert!(username.is_none()); + ssh_username_requested = true; + return Err(git2::Error::from_str("gonna try usernames later")) + } + + // An "SSH_KEY" authentication indicates that we need some sort of SSH + // authentication. This can currently either come from the ssh-agent + // process or from a raw in-memory SSH key. Cargo only supports using + // ssh-agent currently. + // + // If we get called with this then the only way that should be possible + // is if a username is specified in the URL itself (e.g. `username` is + // Some), hence the unwrap() here. We try custom usernames down below. + if allowed.contains(git2::SSH_KEY) && !tried_sshkey { + // If ssh-agent authentication fails, libgit2 will keep + // calling this callback asking for other authentication + // methods to try. Make sure we only try ssh-agent once, + // to avoid looping forever. + tried_sshkey = true; + let username = username.unwrap(); + debug_assert!(!ssh_username_requested); + ssh_agent_attempts.push(username.to_string()); + return git2::Cred::ssh_key_from_agent(username) + } + + // Sometimes libgit2 will ask for a username/password in plaintext. This + // is where Cargo would have an interactive prompt if we supported it, + // but we currently don't! Right now the only way we support fetching a + // plaintext password is through the `credential.helper` support, so + // fetch that here. + if allowed.contains(git2::USER_PASS_PLAINTEXT) { + let r = git2::Cred::credential_helper(cfg, url, username); + cred_helper_bad = Some(r.is_err()); + return r + } + + // I'm... not sure what the DEFAULT kind of authentication is, but seems + // easy to support? + if allowed.contains(git2::DEFAULT) { + return git2::Cred::default() + } + + // Whelp, we tried our best + Err(git2::Error::from_str("no authentication available")) + }); + + // Ok, so if it looks like we're going to be doing ssh authentication, we + // want to try a few different usernames as one wasn't specified in the URL + // for us to use. In order, we'll try: + // + // * A credential helper's username for this URL, if available. + // * This account's username. + // * "git" + // + // We have to restart the authentication session each time (due to + // constraints in libssh2 I guess? maybe this is inherent to ssh?), so we + // call our callback, `f`, in a loop here. + if ssh_username_requested { + debug_assert!(res.is_err()); + let mut attempts = Vec::new(); + attempts.push("git".to_string()); + if let Ok(s) = env::var("USER").or_else(|_| env::var("USERNAME")) { + attempts.push(s); + } + if let Some(ref s) = cred_helper.username { + attempts.push(s.clone()); + } + + while let Some(s) = attempts.pop() { + // We should get `USERNAME` first, where we just return our attempt, + // and then after that we should get `SSH_KEY`. If the first attempt + // fails we'll get called again, but we don't have another option so + // we bail out. + let mut attempts = 0; + res = f(&mut |_url, username, allowed| { + if allowed.contains(git2::USERNAME) { + return git2::Cred::username(&s); + } + if allowed.contains(git2::SSH_KEY) { + debug_assert_eq!(Some(&s[..]), username); + attempts += 1; + if attempts == 1 { + ssh_agent_attempts.push(s.to_string()); + return git2::Cred::ssh_key_from_agent(&s) + } + } + Err(git2::Error::from_str("no authentication available")) + }); + + // If we made two attempts then that means: + // + // 1. A username was requested, we returned `s`. + // 2. An ssh key was requested, we returned to look up `s` in the + // ssh agent. + // 3. For whatever reason that lookup failed, so we were asked again + // for another mode of authentication. + // + // Essentially, if `attempts == 2` then in theory the only error was + // that this username failed to authenticate (e.g. no other network + // errors happened). Otherwise something else is funny so we bail + // out. + if attempts != 2 { + break + } + } + } + + if res.is_ok() || !any_attempts { + return res.map_err(From::from) + } + + // In the case of an authentication failure (where we tried something) then + // we try to give a more helpful error message about precisely what we + // tried. + res.map_err(CargoError::from).map_err(|e| e.into_internal()).chain_err(|| { + let mut msg = "failed to authenticate when downloading \ + repository".to_string(); + if !ssh_agent_attempts.is_empty() { + let names = ssh_agent_attempts.iter() + .map(|s| format!("`{}`", s)) + .collect::>() + .join(", "); + msg.push_str(&format!("\nattempted ssh-agent authentication, but \ + none of the usernames {} succeeded", names)); + } + if let Some(failed_cred_helper) = cred_helper_bad { + if failed_cred_helper { + msg.push_str("\nattempted to find username/password via \ + git's `credential.helper` support, but failed"); + } else { + msg.push_str("\nattempted to find username/password via \ + `credential.helper`, but maybe the found \ + credentials were incorrect"); + } + } + msg + }) +} + +pub fn fetch(repo: &mut git2::Repository, + url: &Url, + refspec: &str, + config: &Config) -> CargoResult<()> { + if !config.network_allowed() { + bail!("attempting to update a git repository, but --frozen \ + was specified") + } + + // If we're fetching from github, attempt github's special fast path for + // testing if we've already got an up-to-date copy of the repository + if url.host_str() == Some("github.com") { + if let Ok(oid) = repo.refname_to_id("refs/remotes/origin/master") { + let mut handle = config.http()?.borrow_mut(); + debug!("attempting github fast path for {}", url); + if github_up_to_date(&mut handle, url, &oid) { + return Ok(()) + } else { + debug!("fast path failed, falling back to a git fetch"); + } + } + } + + // We reuse repositories quite a lot, so before we go through and update the + // repo check to see if it's a little too old and could benefit from a gc. + // In theory this shouldn't be too too expensive compared to the network + // request we're about to issue. + maybe_gc_repo(repo)?; + + debug!("doing a fetch for {}", url); + with_authentication(url.as_str(), &repo.config()?, |f| { + let mut cb = git2::RemoteCallbacks::new(); + cb.credentials(f); + + // Create a local anonymous remote in the repository to fetch the url + let mut remote = repo.remote_anonymous(url.as_str())?; + let mut opts = git2::FetchOptions::new(); + opts.remote_callbacks(cb) + .download_tags(git2::AutotagOption::All); + + network::with_retry(config, || { + debug!("initiating fetch of {} from {}", refspec, url); + remote.fetch(&[refspec], Some(&mut opts), None) + .map_err(CargoError::from) + })?; + Ok(()) + }) +} + +/// Cargo has a bunch of long-lived git repositories in its global cache and +/// some, like the index, are updated very frequently. Right now each update +/// creates a new "pack file" inside the git database, and over time this can +/// cause bad performance and bad current behavior in libgit2. +/// +/// One pathological use case today is where libgit2 opens hundreds of file +/// descriptors, getting us dangerously close to blowing out the OS limits of +/// how many fds we can have open. This is detailed in #4403. +/// +/// To try to combat this problem we attempt a `git gc` here. Note, though, that +/// we may not even have `git` installed on the system! As a result we +/// opportunistically try a `git gc` when the pack directory looks too big, and +/// failing that we just blow away the repository and start over. +fn maybe_gc_repo(repo: &mut git2::Repository) -> CargoResult<()> { + // Here we arbitrarily declare that if you have more than 100 files in your + // `pack` folder that we need to do a gc. + let entries = match repo.path().join("objects/pack").read_dir() { + Ok(e) => e.count(), + Err(_) => { + debug!("skipping gc as pack dir appears gone"); + return Ok(()) + } + }; + let max = env::var("__CARGO_PACKFILE_LIMIT").ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(100); + if entries < max { + debug!("skipping gc as there's only {} pack files", entries); + return Ok(()) + } + + // First up, try a literal `git gc` by shelling out to git. This is pretty + // likely to fail though as we may not have `git` installed. Note that + // libgit2 doesn't currently implement the gc operation, so there's no + // equivalent there. + match Command::new("git").arg("gc").current_dir(repo.path()).output() { + Ok(out) => { + debug!("git-gc status: {}\n\nstdout ---\n{}\nstderr ---\n{}", + out.status, + String::from_utf8_lossy(&out.stdout), + String::from_utf8_lossy(&out.stderr)); + if out.status.success() { + let new = git2::Repository::open(repo.path())?; + mem::replace(repo, new); + return Ok(()) + } + } + Err(e) => debug!("git-gc failed to spawn: {}", e), + } + + // Alright all else failed, let's start over. + // + // Here we want to drop the current repository object pointed to by `repo`, + // so we initialize temporary repository in a sub-folder, blow away the + // existing git folder, and then recreate the git repo. Finally we blow away + // the `tmp` folder we allocated. + let path = repo.path().to_path_buf(); + let tmp = path.join("tmp"); + mem::replace(repo, git2::Repository::init(&tmp)?); + for entry in path.read_dir()? { + let entry = entry?; + if entry.file_name().to_str() == Some("tmp") { + continue + } + let path = entry.path(); + drop(fs::remove_file(&path).or_else(|_| fs::remove_dir_all(&path))); + } + if repo.is_bare() { + mem::replace(repo, git2::Repository::init_bare(path)?); + } else { + mem::replace(repo, git2::Repository::init(path)?); + } + fs::remove_dir_all(&tmp).chain_err(|| { + format!("failed to remove {:?}", tmp) + })?; + Ok(()) +} + +/// Updating the index is done pretty regularly so we want it to be as fast as +/// possible. For registries hosted on github (like the crates.io index) there's +/// a fast path available to use [1] to tell us that there's no updates to be +/// made. +/// +/// This function will attempt to hit that fast path and verify that the `oid` +/// is actually the current `master` branch of the repository. If `true` is +/// returned then no update needs to be performed, but if `false` is returned +/// then the standard update logic still needs to happen. +/// +/// [1]: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference +/// +/// Note that this function should never cause an actual failure because it's +/// just a fast path. As a result all errors are ignored in this function and we +/// just return a `bool`. Any real errors will be reported through the normal +/// update path above. +fn github_up_to_date(handle: &mut Easy, url: &Url, oid: &git2::Oid) -> bool { + macro_rules! try { + ($e:expr) => (match $e { + Some(e) => e, + None => return false, + }) + } + + // This expects github urls in the form `github.com/user/repo` and nothing + // else + let mut pieces = try!(url.path_segments()); + let username = try!(pieces.next()); + let repo = try!(pieces.next()); + if pieces.next().is_some() { + return false + } + + let url = format!("https://api.github.com/repos/{}/{}/commits/master", + username, repo); + try!(handle.get(true).ok()); + try!(handle.url(&url).ok()); + try!(handle.useragent("cargo").ok()); + let mut headers = List::new(); + try!(headers.append("Accept: application/vnd.github.3.sha").ok()); + try!(headers.append(&format!("If-None-Match: \"{}\"", oid)).ok()); + try!(handle.http_headers(headers).ok()); + try!(handle.perform().ok()); + + try!(handle.response_code().ok()) == 304 +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/mod.rs new file mode 100644 index 000000000..ed784e95a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/mod.rs @@ -0,0 +1,13 @@ +pub use self::config::SourceConfigMap; +pub use self::directory::DirectorySource; +pub use self::git::GitSource; +pub use self::path::PathSource; +pub use self::registry::{RegistrySource, CRATES_IO}; +pub use self::replaced::ReplacedSource; + +pub mod config; +pub mod directory; +pub mod git; +pub mod path; +pub mod registry; +pub mod replaced; diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/path.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/path.rs new file mode 100644 index 000000000..11760c29a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/path.rs @@ -0,0 +1,544 @@ +use std::fmt::{self, Debug, Formatter}; +use std::fs; +use std::path::{Path, PathBuf}; + +use filetime::FileTime; +use git2; +use glob::Pattern; +use ignore::Match; +use ignore::gitignore::GitignoreBuilder; + +use core::{Package, PackageId, Summary, SourceId, Source, Dependency, Registry}; +use ops; +use util::{self, CargoError, CargoResult, internal}; +use util::Config; + +pub struct PathSource<'cfg> { + source_id: SourceId, + path: PathBuf, + updated: bool, + packages: Vec, + config: &'cfg Config, + recursive: bool, +} + +impl<'cfg> PathSource<'cfg> { + /// Invoked with an absolute path to a directory that contains a Cargo.toml. + /// + /// This source will only return the package at precisely the `path` + /// specified, and it will be an error if there's not a package at `path`. + pub fn new(path: &Path, id: &SourceId, config: &'cfg Config) + -> PathSource<'cfg> { + PathSource { + source_id: id.clone(), + path: path.to_path_buf(), + updated: false, + packages: Vec::new(), + config: config, + recursive: false, + } + } + + /// Creates a new source which is walked recursively to discover packages. + /// + /// This is similar to the `new` method except that instead of requiring a + /// valid package to be present at `root` the folder is walked entirely to + /// crawl for packages. + /// + /// Note that this should be used with care and likely shouldn't be chosen + /// by default! + pub fn new_recursive(root: &Path, id: &SourceId, config: &'cfg Config) + -> PathSource<'cfg> { + PathSource { + recursive: true, + .. PathSource::new(root, id, config) + } + } + + pub fn root_package(&mut self) -> CargoResult { + trace!("root_package; source={:?}", self); + + self.update()?; + + match self.packages.iter().find(|p| p.root() == &*self.path) { + Some(pkg) => Ok(pkg.clone()), + None => Err(internal("no package found in source")) + } + } + + pub fn read_packages(&self) -> CargoResult> { + if self.updated { + Ok(self.packages.clone()) + } else if self.recursive { + ops::read_packages(&self.path, &self.source_id, self.config) + } else { + let path = self.path.join("Cargo.toml"); + let (pkg, _) = ops::read_package(&path, &self.source_id, self.config)?; + Ok(vec![pkg]) + } + } + + /// List all files relevant to building this package inside this source. + /// + /// This function will use the appropriate methods to determine the + /// set of files underneath this source's directory which are relevant for + /// building `pkg`. + /// + /// The basic assumption of this method is that all files in the directory + /// are relevant for building this package, but it also contains logic to + /// use other methods like .gitignore to filter the list of files. + /// + /// ## Pattern matching strategy + /// + /// Migrating from a glob-like pattern matching (using `glob` crate) to a + /// gitignore-like pattern matching (using `ignore` crate). The migration + /// stages are: + /// + /// 1) Only warn users about the future change iff their matching rules are + /// affected. (CURRENT STAGE) + /// + /// 2) Switch to the new strategy and upate documents. Still keep warning + /// affected users. + /// + /// 3) Drop the old strategy and no mor warnings. + /// + /// See for more info. + pub fn list_files(&self, pkg: &Package) -> CargoResult> { + let root = pkg.root(); + let no_include_option = pkg.manifest().include().is_empty(); + + // glob-like matching rules + + let glob_parse = |p: &String| { + let pattern: &str = if p.starts_with('/') { + &p[1..p.len()] + } else { + p + }; + Pattern::new(pattern).map_err(|e| { + CargoError::from(format!("could not parse glob pattern `{}`: {}", p, e)) + }) + }; + + let glob_exclude = pkg.manifest() + .exclude() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_include = pkg.manifest() + .include() + .iter() + .map(|p| glob_parse(p)) + .collect::, _>>()?; + + let glob_should_package = |relative_path: &Path| -> bool { + fn glob_match(patterns: &Vec, relative_path: &Path) -> bool { + patterns.iter().any(|pattern| pattern.matches_path(relative_path)) + } + + // include and exclude options are mutually exclusive. + if no_include_option { + !glob_match(&glob_exclude, relative_path) + } else { + glob_match(&glob_include, relative_path) + } + }; + + // ignore-like matching rules + + let mut exclude_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().exclude() { + exclude_builder.add_line(None, rule)?; + } + let ignore_exclude = exclude_builder.build()?; + + let mut include_builder = GitignoreBuilder::new(root); + for rule in pkg.manifest().include() { + include_builder.add_line(None, rule)?; + } + let ignore_include = include_builder.build()?; + + let ignore_should_package = |relative_path: &Path| -> CargoResult { + // include and exclude options are mutually exclusive. + if no_include_option { + match ignore_exclude.matched_path_or_any_parents( + relative_path, + /* is_dir */ false, + ) { + Match::None => Ok(true), + Match::Ignore(_) => Ok(false), + Match::Whitelist(pattern) => Err(CargoError::from(format!( + "exclude rules cannot start with `!`: {}", + pattern.original() + ))), + } + } else { + match ignore_include.matched_path_or_any_parents( + relative_path, + /* is_dir */ false, + ) { + Match::None => Ok(false), + Match::Ignore(_) => Ok(true), + Match::Whitelist(pattern) => Err(CargoError::from(format!( + "include rules cannot start with `!`: {}", + pattern.original() + ))), + } + } + }; + + // matching to paths + + let mut filter = |path: &Path| -> CargoResult { + let relative_path = util::without_prefix(path, root).unwrap(); + let glob_should_package = glob_should_package(relative_path); + let ignore_should_package = ignore_should_package(relative_path)?; + + if glob_should_package != ignore_should_package { + if glob_should_package { + if no_include_option { + self.config + .shell() + .warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be excluded in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } else { + self.config + .shell() + .warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be included in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } + } else if no_include_option { + self.config + .shell() + .warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL NOT be excluded in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } else { + self.config + .shell() + .warn(format!( + "Pattern matching for Cargo's include/exclude fields is changing and \ + file `{}` WILL be included in a future Cargo version.\n\ + See https://github.com/rust-lang/cargo/issues/4268 for more info", + relative_path.display() + ))?; + } + } + + // Update to ignore_should_package for Stage 2 + Ok(glob_should_package) + }; + + // attempt git-prepopulate only if no `include` (rust-lang/cargo#4135) + if no_include_option { + if let Some(result) = self.discover_git_and_list_files(pkg, root, &mut filter) { + return result; + } + } + self.list_files_walk(pkg, &mut filter) + } + + // Returns Some(_) if found sibling Cargo.toml and .git folder; + // otherwise caller should fall back on full file list. + fn discover_git_and_list_files(&self, + pkg: &Package, + root: &Path, + filter: &mut FnMut(&Path) -> CargoResult) + -> Option>> { + // If this package is in a git repository, then we really do want to + // query the git repository as it takes into account items such as + // .gitignore. We're not quite sure where the git repository is, + // however, so we do a bit of a probe. + // + // We walk this package's path upwards and look for a sibling + // Cargo.toml and .git folder. If we find one then we assume that we're + // part of that repository. + let mut cur = root; + loop { + if cur.join("Cargo.toml").is_file() { + // If we find a git repository next to this Cargo.toml, we still + // check to see if we are indeed part of the index. If not, then + // this is likely an unrelated git repo, so keep going. + if let Ok(repo) = git2::Repository::open(cur) { + let index = match repo.index() { + Ok(index) => index, + Err(err) => return Some(Err(err.into())), + }; + let path = util::without_prefix(root, cur) + .unwrap().join("Cargo.toml"); + if index.get_path(&path, 0).is_some() { + return Some(self.list_files_git(pkg, repo, filter)); + } + } + } + // don't cross submodule boundaries + if cur.join(".git").is_dir() { + break + } + match cur.parent() { + Some(parent) => cur = parent, + None => break, + } + } + None + } + + fn list_files_git(&self, pkg: &Package, repo: git2::Repository, + filter: &mut FnMut(&Path) -> CargoResult) + -> CargoResult> { + warn!("list_files_git {}", pkg.package_id()); + let index = repo.index()?; + let root = repo.workdir().ok_or_else(|| { + internal("Can't list files on a bare repository.") + })?; + let pkg_path = pkg.root(); + + let mut ret = Vec::::new(); + + // We use information from the git repository to guide us in traversing + // its tree. The primary purpose of this is to take advantage of the + // .gitignore and auto-ignore files that don't matter. + // + // Here we're also careful to look at both tracked and untracked files as + // the untracked files are often part of a build and may become relevant + // as part of a future commit. + let index_files = index.iter().map(|entry| { + use libgit2_sys::GIT_FILEMODE_COMMIT; + let is_dir = entry.mode == GIT_FILEMODE_COMMIT as u32; + (join(root, &entry.path), Some(is_dir)) + }); + let mut opts = git2::StatusOptions::new(); + opts.include_untracked(true); + if let Some(suffix) = util::without_prefix(pkg_path, root) { + opts.pathspec(suffix); + } + let statuses = repo.statuses(Some(&mut opts))?; + let untracked = statuses.iter().filter_map(|entry| { + match entry.status() { + git2::STATUS_WT_NEW => Some((join(root, entry.path_bytes()), None)), + _ => None, + } + }); + + let mut subpackages_found = Vec::new(); + + for (file_path, is_dir) in index_files.chain(untracked) { + let file_path = file_path?; + + // Filter out files blatantly outside this package. This is helped a + // bit obove via the `pathspec` function call, but we need to filter + // the entries in the index as well. + if !file_path.starts_with(pkg_path) { + continue + } + + match file_path.file_name().and_then(|s| s.to_str()) { + // Filter out Cargo.lock and target always, we don't want to + // package a lock file no one will ever read and we also avoid + // build artifacts + Some("Cargo.lock") | + Some("target") => continue, + + // Keep track of all sub-packages found and also strip out all + // matches we've found so far. Note, though, that if we find + // our own `Cargo.toml` we keep going. + Some("Cargo.toml") => { + let path = file_path.parent().unwrap(); + if path != pkg_path { + warn!("subpackage found: {}", path.display()); + ret.retain(|p| !p.starts_with(path)); + subpackages_found.push(path.to_path_buf()); + continue + } + } + + _ => {} + } + + // If this file is part of any other sub-package we've found so far, + // skip it. + if subpackages_found.iter().any(|p| file_path.starts_with(p)) { + continue + } + + if is_dir.unwrap_or_else(|| file_path.is_dir()) { + warn!(" found submodule {}", file_path.display()); + let rel = util::without_prefix(&file_path, root).unwrap(); + let rel = rel.to_str().ok_or_else(|| { + CargoError::from(format!("invalid utf-8 filename: {}", rel.display())) + })?; + // Git submodules are currently only named through `/` path + // separators, explicitly not `\` which windows uses. Who knew? + let rel = rel.replace(r"\", "/"); + match repo.find_submodule(&rel).and_then(|s| s.open()) { + Ok(repo) => { + let files = self.list_files_git(pkg, repo, filter)?; + ret.extend(files.into_iter()); + } + Err(..) => { + PathSource::walk(&file_path, &mut ret, false, filter)?; + } + } + } else if (*filter)(&file_path)? { + // We found a file! + warn!(" found {}", file_path.display()); + ret.push(file_path); + } + } + return Ok(ret); + + #[cfg(unix)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::os::unix::prelude::*; + use std::ffi::OsStr; + Ok(path.join(::from_bytes(data))) + } + #[cfg(windows)] + fn join(path: &Path, data: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(data) { + Ok(s) => Ok(path.join(s)), + Err(..) => Err(internal("cannot process path in git with a non \ + unicode filename")), + } + } + } + + fn list_files_walk(&self, pkg: &Package, filter: &mut FnMut(&Path) -> CargoResult) + -> CargoResult> { + let mut ret = Vec::new(); + PathSource::walk(pkg.root(), &mut ret, true, filter)?; + Ok(ret) + } + + fn walk(path: &Path, ret: &mut Vec, + is_root: bool, filter: &mut FnMut(&Path) -> CargoResult) + -> CargoResult<()> + { + if !fs::metadata(&path).map(|m| m.is_dir()).unwrap_or(false) { + if (*filter)(path)? { + ret.push(path.to_path_buf()); + } + return Ok(()) + } + // Don't recurse into any sub-packages that we have + if !is_root && fs::metadata(&path.join("Cargo.toml")).is_ok() { + return Ok(()) + } + + // For package integration tests, we need to sort the paths in a deterministic order to + // be able to match stdout warnings in the same order. + // + // TODO: Drop collect and sort after transition period and dropping wraning tests. + // See + // and + let mut entries: Vec = fs::read_dir(path)?.map(|e| e.unwrap()).collect(); + entries.sort_by(|a, b| a.path().as_os_str().cmp(b.path().as_os_str())); + for entry in entries { + let path = entry.path(); + let name = path.file_name().and_then(|s| s.to_str()); + // Skip dotfile directories + if name.map(|s| s.starts_with('.')) == Some(true) { + continue + } else if is_root { + // Skip cargo artifacts + match name { + Some("target") | Some("Cargo.lock") => continue, + _ => {} + } + } + PathSource::walk(&path, ret, false, filter)?; + } + Ok(()) + } +} + +impl<'cfg> Debug for PathSource<'cfg> { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "the paths source") + } +} + +impl<'cfg> Registry for PathSource<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + for s in self.packages.iter().map(|p| p.summary()) { + if dep.matches(s) { + f(s.clone()) + } + } + Ok(()) + } + + fn supports_checksums(&self) -> bool { + false + } + + fn requires_precise(&self) -> bool { + false + } +} + +impl<'cfg> Source for PathSource<'cfg> { + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + if !self.updated { + let packages = self.read_packages()?; + self.packages.extend(packages.into_iter()); + self.updated = true; + } + + Ok(()) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + trace!("getting packages; id={}", id); + + let pkg = self.packages.iter().find(|pkg| pkg.package_id() == id); + pkg.cloned().ok_or_else(|| { + internal(format!("failed to find {} in path source", id)) + }) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + if !self.updated { + return Err(internal("BUG: source was not updated")); + } + + let mut max = FileTime::zero(); + let mut max_path = PathBuf::from(""); + for file in self.list_files(pkg)? { + // An fs::stat error here is either because path is a + // broken symlink, a permissions error, or a race + // condition where this path was rm'ed - either way, + // we can ignore the error and treat the path's mtime + // as 0. + let mtime = fs::metadata(&file).map(|meta| { + FileTime::from_last_modification_time(&meta) + }).unwrap_or(FileTime::zero()); + warn!("{} {}", mtime, file.display()); + if mtime > max { + max = mtime; + max_path = file; + } + } + trace!("fingerprint {}: {}", self.path.display(), max); + Ok(format!("{} ({})", max, max_path.display())) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/registry/index.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/index.rs new file mode 100644 index 000000000..14c8ab7d6 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/index.rs @@ -0,0 +1,192 @@ +use std::collections::HashMap; +use std::path::Path; +use std::str; + +use serde_json; +use semver::Version; + +use core::dependency::Dependency; +use core::{SourceId, Summary, PackageId}; +use sources::registry::{RegistryPackage, INDEX_LOCK}; +use sources::registry::RegistryData; +use util::{CargoError, CargoResult, internal, Filesystem, Config}; + +pub struct RegistryIndex<'cfg> { + source_id: SourceId, + path: Filesystem, + cache: HashMap>, + hashes: HashMap>, // (name, vers) => cksum + config: &'cfg Config, + locked: bool, +} + +impl<'cfg> RegistryIndex<'cfg> { + pub fn new(id: &SourceId, + path: &Filesystem, + config: &'cfg Config, + locked: bool) + -> RegistryIndex<'cfg> { + RegistryIndex { + source_id: id.clone(), + path: path.clone(), + cache: HashMap::new(), + hashes: HashMap::new(), + config: config, + locked: locked, + } + } + + /// Return the hash listed for a specified PackageId. + pub fn hash(&mut self, + pkg: &PackageId, + load: &mut RegistryData) + -> CargoResult { + let name = pkg.name(); + let version = pkg.version(); + if let Some(s) = self.hashes.get(name).and_then(|v| v.get(version)) { + return Ok(s.clone()) + } + // Ok, we're missing the key, so parse the index file to load it. + self.summaries(name, load)?; + self.hashes.get(name).and_then(|v| v.get(version)).ok_or_else(|| { + internal(format!("no hash listed for {}", pkg)) + }).map(|s| s.clone()) + } + + /// Parse the on-disk metadata for the package provided + /// + /// Returns a list of pairs of (summary, yanked) for the package name + /// specified. + pub fn summaries(&mut self, + name: &str, + load: &mut RegistryData) + -> CargoResult<&Vec<(Summary, bool)>> { + if self.cache.contains_key(name) { + return Ok(&self.cache[name]); + } + let summaries = self.load_summaries(name, load)?; + self.cache.insert(name.to_string(), summaries); + Ok(&self.cache[name]) + } + + fn load_summaries(&mut self, + name: &str, + load: &mut RegistryData) + -> CargoResult> { + let (root, _lock) = if self.locked { + let lock = self.path.open_ro(Path::new(INDEX_LOCK), + self.config, + "the registry index"); + match lock { + Ok(lock) => { + (lock.path().parent().unwrap().to_path_buf(), Some(lock)) + } + Err(_) => return Ok(Vec::new()), + } + } else { + (self.path.clone().into_path_unlocked(), None) + }; + + let fs_name = name.chars().flat_map(|c| { + c.to_lowercase() + }).collect::(); + + // see module comment for why this is structured the way it is + let path = match fs_name.len() { + 1 => format!("1/{}", fs_name), + 2 => format!("2/{}", fs_name), + 3 => format!("3/{}/{}", &fs_name[..1], fs_name), + _ => format!("{}/{}/{}", &fs_name[0..2], &fs_name[2..4], fs_name), + }; + let mut ret = Vec::new(); + let mut hit_closure = false; + let err = load.load(&root, Path::new(&path), &mut |contents| { + hit_closure = true; + let contents = str::from_utf8(contents).map_err(|_| { + CargoError::from("registry index file was not valid utf-8") + })?; + ret.reserve(contents.lines().count()); + let lines = contents.lines() + .map(|s| s.trim()) + .filter(|l| !l.is_empty()); + + // Attempt forwards-compatibility on the index by ignoring + // everything that we ourselves don't understand, that should + // allow future cargo implementations to break the + // interpretation of each line here and older cargo will simply + // ignore the new lines. + ret.extend(lines.filter_map(|line| { + self.parse_registry_package(line).ok() + })); + + Ok(()) + }); + + // We ignore lookup failures as those are just crates which don't exist + // or we haven't updated the registry yet. If we actually ran the + // closure though then we care about those errors. + if hit_closure { + err?; + } + + Ok(ret) + } + + /// Parse a line from the registry's index file into a Summary for a + /// package. + /// + /// The returned boolean is whether or not the summary has been yanked. + fn parse_registry_package(&mut self, line: &str) + -> CargoResult<(Summary, bool)> { + let RegistryPackage { + name, vers, cksum, deps, features, yanked + } = super::DEFAULT_ID.set(&self.source_id, || { + serde_json::from_str::(line) + })?; + let pkgid = PackageId::new(&name, &vers, &self.source_id)?; + let summary = Summary::new(pkgid, deps.inner, features)?; + let summary = summary.set_checksum(cksum.clone()); + if self.hashes.contains_key(&name[..]) { + self.hashes.get_mut(&name[..]).unwrap().insert(vers, cksum); + } else { + self.hashes.entry(name.into_owned()) + .or_insert_with(HashMap::new) + .insert(vers, cksum); + } + Ok((summary, yanked.unwrap_or(false))) + } + + pub fn query(&mut self, + dep: &Dependency, + load: &mut RegistryData, + f: &mut FnMut(Summary)) + -> CargoResult<()> { + let source_id = self.source_id.clone(); + let summaries = self.summaries(dep.name(), load)?; + let summaries = summaries.iter().filter(|&&(_, yanked)| { + dep.source_id().precise().is_some() || !yanked + }).map(|s| s.0.clone()); + + // Handle `cargo update --precise` here. If specified, our own source + // will have a precise version listed of the form `=` where + // `` is the name of a crate on this source and `` is the + // version requested (agument to `--precise`). + let summaries = summaries.filter(|s| { + match source_id.precise() { + Some(p) if p.starts_with(dep.name()) && + p[dep.name().len()..].starts_with('=') => { + let vers = &p[dep.name().len() + 1..]; + s.version().to_string() == vers + } + _ => true, + } + }); + + for summary in summaries { + if dep.matches(&summary) { + f(summary); + } + } + Ok(()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/registry/local.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/local.rs new file mode 100644 index 000000000..5803fd77d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/local.rs @@ -0,0 +1,105 @@ +use std::io::SeekFrom; +use std::io::prelude::*; +use std::path::Path; + +use core::PackageId; +use hex::ToHex; +use sources::registry::{RegistryData, RegistryConfig}; +use util::FileLock; +use util::paths; +use util::{Config, Sha256, Filesystem}; +use util::errors::{CargoResult, CargoResultExt}; + +pub struct LocalRegistry<'cfg> { + index_path: Filesystem, + root: Filesystem, + src_path: Filesystem, + config: &'cfg Config, +} + +impl<'cfg> LocalRegistry<'cfg> { + pub fn new(root: &Path, + config: &'cfg Config, + name: &str) -> LocalRegistry<'cfg> { + LocalRegistry { + src_path: config.registry_source_path().join(name), + index_path: Filesystem::new(root.join("index")), + root: Filesystem::new(root.to_path_buf()), + config: config, + } + } +} + +impl<'cfg> RegistryData for LocalRegistry<'cfg> { + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load(&self, + root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> { + data(&paths::read_bytes(&root.join(path))?) + } + + fn config(&mut self) -> CargoResult> { + // Local registries don't have configuration for remote APIs or anything + // like that + Ok(None) + } + + fn update_index(&mut self) -> CargoResult<()> { + // Nothing to update, we just use what's on disk. Verify it actually + // exists though. We don't use any locks as we're just checking whether + // these directories exist. + let root = self.root.clone().into_path_unlocked(); + if !root.is_dir() { + bail!("local registry path is not a directory: {}", + root.display()) + } + let index_path = self.index_path.clone().into_path_unlocked(); + if !index_path.is_dir() { + bail!("local registry index path is not a directory: {}", + index_path.display()) + } + Ok(()) + } + + fn download(&mut self, pkg: &PackageId, checksum: &str) + -> CargoResult { + let crate_file = format!("{}-{}.crate", pkg.name(), pkg.version()); + let mut crate_file = self.root.open_ro(&crate_file, + self.config, + "crate file")?; + + // If we've already got an unpacked version of this crate, then skip the + // checksum below as it is in theory already verified. + let dst = format!("{}-{}", pkg.name(), pkg.version()); + if self.src_path.join(dst).into_path_unlocked().exists() { + return Ok(crate_file) + } + + self.config.shell().status("Unpacking", pkg)?; + + // We don't actually need to download anything per-se, we just need to + // verify the checksum matches the .crate file itself. + let mut state = Sha256::new(); + let mut buf = [0; 64 * 1024]; + loop { + let n = crate_file.read(&mut buf).chain_err(|| { + format!("failed to read `{}`", crate_file.path().display()) + })?; + if n == 0 { + break + } + state.update(&buf[..n]); + } + if state.finish().to_hex() != checksum { + bail!("failed to verify the checksum of `{}`", pkg) + } + + crate_file.seek(SeekFrom::Start(0))?; + + Ok(crate_file) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/registry/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/mod.rs new file mode 100644 index 000000000..c967e2ebc --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/mod.rs @@ -0,0 +1,516 @@ +//! A `Source` for registry-based packages. +//! +//! # What's a Registry? +//! +//! Registries are central locations where packages can be uploaded to, +//! discovered, and searched for. The purpose of a registry is to have a +//! location that serves as permanent storage for versions of a crate over time. +//! +//! Compared to git sources, a registry provides many packages as well as many +//! versions simultaneously. Git sources can also have commits deleted through +//! rebasings where registries cannot have their versions deleted. +//! +//! # The Index of a Registry +//! +//! One of the major difficulties with a registry is that hosting so many +//! packages may quickly run into performance problems when dealing with +//! dependency graphs. It's infeasible for cargo to download the entire contents +//! of the registry just to resolve one package's dependencies, for example. As +//! a result, cargo needs some efficient method of querying what packages are +//! available on a registry, what versions are available, and what the +//! dependencies for each version is. +//! +//! One method of doing so would be having the registry expose an HTTP endpoint +//! which can be queried with a list of packages and a response of their +//! dependencies and versions is returned. This is somewhat inefficient however +//! as we may have to hit the endpoint many times and we may have already +//! queried for much of the data locally already (for other packages, for +//! example). This also involves inventing a transport format between the +//! registry and Cargo itself, so this route was not taken. +//! +//! Instead, Cargo communicates with registries through a git repository +//! referred to as the Index. The Index of a registry is essentially an easily +//! query-able version of the registry's database for a list of versions of a +//! package as well as a list of dependencies for each version. +//! +//! Using git to host this index provides a number of benefits: +//! +//! * The entire index can be stored efficiently locally on disk. This means +//! that all queries of a registry can happen locally and don't need to touch +//! the network. +//! +//! * Updates of the index are quite efficient. Using git buys incremental +//! updates, compressed transmission, etc for free. The index must be updated +//! each time we need fresh information from a registry, but this is one +//! update of a git repository that probably hasn't changed a whole lot so +//! it shouldn't be too expensive. +//! +//! Additionally, each modification to the index is just appending a line at +//! the end of a file (the exact format is described later). This means that +//! the commits for an index are quite small and easily applied/compressable. +//! +//! ## The format of the Index +//! +//! The index is a store for the list of versions for all packages known, so its +//! format on disk is optimized slightly to ensure that `ls registry` doesn't +//! produce a list of all packages ever known. The index also wants to ensure +//! that there's not a million files which may actually end up hitting +//! filesystem limits at some point. To this end, a few decisions were made +//! about the format of the registry: +//! +//! 1. Each crate will have one file corresponding to it. Each version for a +//! crate will just be a line in this file. +//! 2. There will be two tiers of directories for crate names, under which +//! crates corresponding to those tiers will be located. +//! +//! As an example, this is an example hierarchy of an index: +//! +//! ```notrust +//! . +//! ├── 3 +//! │   └── u +//! │   └── url +//! ├── bz +//! │   └── ip +//! │   └── bzip2 +//! ├── config.json +//! ├── en +//! │   └── co +//! │   └── encoding +//! └── li +//!    ├── bg +//!    │   └── libgit2 +//!    └── nk +//!    └── link-config +//! ``` +//! +//! The root of the index contains a `config.json` file with a few entries +//! corresponding to the registry (see `RegistryConfig` below). +//! +//! Otherwise, there are three numbered directories (1, 2, 3) for crates with +//! names 1, 2, and 3 characters in length. The 1/2 directories simply have the +//! crate files underneath them, while the 3 directory is sharded by the first +//! letter of the crate name. +//! +//! Otherwise the top-level directory contains many two-letter directory names, +//! each of which has many sub-folders with two letters. At the end of all these +//! are the actual crate files themselves. +//! +//! The purpose of this layout is to hopefully cut down on `ls` sizes as well as +//! efficient lookup based on the crate name itself. +//! +//! ## Crate files +//! +//! Each file in the index is the history of one crate over time. Each line in +//! the file corresponds to one version of a crate, stored in JSON format (see +//! the `RegistryPackage` structure below). +//! +//! As new versions are published, new lines are appended to this file. The only +//! modifications to this file that should happen over time are yanks of a +//! particular version. +//! +//! # Downloading Packages +//! +//! The purpose of the Index was to provide an efficient method to resolve the +//! dependency graph for a package. So far we only required one network +//! interaction to update the registry's repository (yay!). After resolution has +//! been performed, however we need to download the contents of packages so we +//! can read the full manifest and build the source code. +//! +//! To accomplish this, this source's `download` method will make an HTTP +//! request per-package requested to download tarballs into a local cache. These +//! tarballs will then be unpacked into a destination folder. +//! +//! Note that because versions uploaded to the registry are frozen forever that +//! the HTTP download and unpacking can all be skipped if the version has +//! already been downloaded and unpacked. This caching allows us to only +//! download a package when absolutely necessary. +//! +//! # Filesystem Hierarchy +//! +//! Overall, the `$HOME/.cargo` looks like this when talking about the registry: +//! +//! ```notrust +//! # A folder under which all registry metadata is hosted (similar to +//! # $HOME/.cargo/git) +//! $HOME/.cargo/registry/ +//! +//! # For each registry that cargo knows about (keyed by hostname + hash) +//! # there is a folder which is the checked out version of the index for +//! # the registry in this location. Note that this is done so cargo can +//! # support multiple registries simultaneously +//! index/ +//! registry1-/ +//! registry2-/ +//! ... +//! +//! # This folder is a cache for all downloaded tarballs from a registry. +//! # Once downloaded and verified, a tarball never changes. +//! cache/ +//! registry1-/-.crate +//! ... +//! +//! # Location in which all tarballs are unpacked. Each tarball is known to +//! # be frozen after downloading, so transitively this folder is also +//! # frozen once its unpacked (it's never unpacked again) +//! src/ +//! registry1-/-/... +//! ... +//! ``` + +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::fmt; +use std::fs::File; +use std::path::{PathBuf, Path}; + +use flate2::read::GzDecoder; +use semver::Version; +use serde::de; +use tar::Archive; + +use core::{Source, SourceId, PackageId, Package, Summary, Registry}; +use core::dependency::{Dependency, Kind}; +use sources::PathSource; +use util::{CargoResult, Config, internal, FileLock, Filesystem}; +use util::errors::CargoResultExt; +use util::hex; + +const INDEX_LOCK: &'static str = ".cargo-index-lock"; +pub static CRATES_IO: &'static str = "https://github.com/rust-lang/crates.io-index"; + +pub struct RegistrySource<'cfg> { + source_id: SourceId, + src_path: Filesystem, + config: &'cfg Config, + updated: bool, + ops: Box, + index: index::RegistryIndex<'cfg>, + index_locked: bool, +} + +#[derive(Deserialize)] +pub struct RegistryConfig { + /// Download endpoint for all crates. This will be appended with + /// `///download` and then will be hit with an HTTP GET + /// request to download the tarball for a crate. + pub dl: String, + + /// API endpoint for the registry. This is what's actually hit to perform + /// operations like yanks, owner modifications, publish new crates, etc. + pub api: String, +} + +#[derive(Deserialize)] +struct RegistryPackage<'a> { + name: Cow<'a, str>, + vers: Version, + deps: DependencyList, + features: BTreeMap>, + cksum: String, + yanked: Option, +} + +struct DependencyList { + inner: Vec, +} + +#[derive(Deserialize)] +struct RegistryDependency<'a> { + name: Cow<'a, str>, + req: Cow<'a, str>, + features: Vec, + optional: bool, + default_features: bool, + target: Option>, + kind: Option>, +} + +pub trait RegistryData { + fn index_path(&self) -> &Filesystem; + fn load(&self, + _root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()>; + fn config(&mut self) -> CargoResult>; + fn update_index(&mut self) -> CargoResult<()>; + fn download(&mut self, + pkg: &PackageId, + checksum: &str) -> CargoResult; +} + +mod index; +mod remote; +mod local; + +fn short_name(id: &SourceId) -> String { + let hash = hex::short_hash(id); + let ident = id.url().host_str().unwrap_or("").to_string(); + format!("{}-{}", ident, hash) +} + +impl<'cfg> RegistrySource<'cfg> { + pub fn remote(source_id: &SourceId, + config: &'cfg Config) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = remote::RemoteRegistry::new(source_id, config, &name); + RegistrySource::new(source_id, config, &name, Box::new(ops), true) + } + + pub fn local(source_id: &SourceId, + path: &Path, + config: &'cfg Config) -> RegistrySource<'cfg> { + let name = short_name(source_id); + let ops = local::LocalRegistry::new(path, config, &name); + RegistrySource::new(source_id, config, &name, Box::new(ops), false) + } + + fn new(source_id: &SourceId, + config: &'cfg Config, + name: &str, + ops: Box, + index_locked: bool) -> RegistrySource<'cfg> { + RegistrySource { + src_path: config.registry_source_path().join(name), + config: config, + source_id: source_id.clone(), + updated: false, + index: index::RegistryIndex::new(source_id, + ops.index_path(), + config, + index_locked), + index_locked: index_locked, + ops: ops, + } + } + + /// Decode the configuration stored within the registry. + /// + /// This requires that the index has been at least checked out. + pub fn config(&mut self) -> CargoResult> { + self.ops.config() + } + + /// Unpacks a downloaded package into a location where it's ready to be + /// compiled. + /// + /// No action is taken if the source looks like it's already unpacked. + fn unpack_package(&self, + pkg: &PackageId, + tarball: &FileLock) + -> CargoResult { + let dst = self.src_path.join(&format!("{}-{}", pkg.name(), + pkg.version())); + dst.create_dir()?; + // Note that we've already got the `tarball` locked above, and that + // implies a lock on the unpacked destination as well, so this access + // via `into_path_unlocked` should be ok. + let dst = dst.into_path_unlocked(); + let ok = dst.join(".cargo-ok"); + if ok.exists() { + return Ok(dst) + } + + let gz = GzDecoder::new(tarball.file())?; + let mut tar = Archive::new(gz); + let prefix = dst.file_name().unwrap(); + let parent = dst.parent().unwrap(); + for entry in tar.entries()? { + let mut entry = entry.chain_err(|| "failed to iterate over archive")?; + let entry_path = entry.path() + .chain_err(|| "failed to read entry path")? + .into_owned(); + + // We're going to unpack this tarball into the global source + // directory, but we want to make sure that it doesn't accidentally + // (or maliciously) overwrite source code from other crates. Cargo + // itself should never generate a tarball that hits this error, and + // crates.io should also block uploads with these sorts of tarballs, + // but be extra sure by adding a check here as well. + if !entry_path.starts_with(prefix) { + return Err(format!("invalid tarball downloaded, contains \ + a file at {:?} which isn't under {:?}", + entry_path, prefix).into()) + } + + // Once that's verified, unpack the entry as usual. + entry.unpack_in(parent).chain_err(|| { + format!("failed to unpack entry at `{}`", entry_path.display()) + })?; + } + File::create(&ok)?; + Ok(dst.clone()) + } + + fn do_update(&mut self) -> CargoResult<()> { + self.ops.update_index()?; + let path = self.ops.index_path(); + self.index = index::RegistryIndex::new(&self.source_id, + path, + self.config, + self.index_locked); + Ok(()) + } +} + +impl<'cfg> Registry for RegistrySource<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + // If this is a precise dependency, then it came from a lockfile and in + // theory the registry is known to contain this version. If, however, we + // come back with no summaries, then our registry may need to be + // updated, so we fall back to performing a lazy update. + if dep.source_id().precise().is_some() && !self.updated { + let mut called = false; + self.index.query(dep, &mut *self.ops, &mut |s| { + called = true; + f(s); + })?; + if called { + return Ok(()) + } else { + self.do_update()?; + } + } + + self.index.query(dep, &mut *self.ops, f) + } + + fn supports_checksums(&self) -> bool { + true + } + + fn requires_precise(&self) -> bool { + false + } +} + +impl<'cfg> Source for RegistrySource<'cfg> { + fn source_id(&self) -> &SourceId { + &self.source_id + } + + fn update(&mut self) -> CargoResult<()> { + // If we have an imprecise version then we don't know what we're going + // to look for, so we always attempt to perform an update here. + // + // If we have a precise version, then we'll update lazily during the + // querying phase. Note that precise in this case is only + // `Some("locked")` as other `Some` values indicate a `cargo update + // --precise` request + if self.source_id.precise() != Some("locked") { + self.do_update()?; + } + Ok(()) + } + + fn download(&mut self, package: &PackageId) -> CargoResult { + let hash = self.index.hash(package, &mut *self.ops)?; + let path = self.ops.download(package, &hash)?; + let path = self.unpack_package(package, &path).chain_err(|| { + internal(format!("failed to unpack package `{}`", package)) + })?; + let mut src = PathSource::new(&path, &self.source_id, self.config); + src.update()?; + let pkg = src.download(package)?; + + // Unfortunately the index and the actual Cargo.toml in the index can + // differ due to historical Cargo bugs. To paper over these we trash the + // *summary* loaded from the Cargo.toml we just downloaded with the one + // we loaded from the index. + let summaries = self.index.summaries(package.name(), &mut *self.ops)?; + let summary = summaries.iter().map(|s| &s.0).find(|s| { + s.package_id() == package + }).expect("summary not found"); + let mut manifest = pkg.manifest().clone(); + manifest.set_summary(summary.clone()); + Ok(Package::new(manifest, pkg.manifest_path())) + } + + fn fingerprint(&self, pkg: &Package) -> CargoResult { + Ok(pkg.package_id().version().to_string()) + } +} + +// TODO: this is pretty unfortunate, ideally we'd use `DeserializeSeed` which +// is intended for "deserializing with context" but that means we couldn't +// use `#[derive(Deserialize)]` on `RegistryPackage` unfortunately. +// +// I'm told, however, that https://github.com/serde-rs/serde/pull/909 will solve +// all our problems here. Until that lands this thread local is just a +// workaround in the meantime. +// +// If you're reading this and find this thread local funny, check to see if that +// PR is merged. If it is then let's ditch this thread local! +scoped_thread_local!(static DEFAULT_ID: SourceId); + +impl<'de> de::Deserialize<'de> for DependencyList { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer<'de>, + { + return deserializer.deserialize_seq(Visitor); + + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = DependencyList; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a list of dependencies") + } + + fn visit_seq(self, mut seq: A) -> Result + where A: de::SeqAccess<'de>, + { + let mut ret = Vec::new(); + if let Some(size) = seq.size_hint() { + ret.reserve(size); + } + while let Some(element) = seq.next_element::()? { + ret.push(parse_registry_dependency(element).map_err(|e| { + de::Error::custom(e) + })?); + } + + Ok(DependencyList { inner: ret }) + } + } + } +} + +/// Converts an encoded dependency in the registry to a cargo dependency +fn parse_registry_dependency(dep: RegistryDependency) + -> CargoResult { + let RegistryDependency { + name, req, features, optional, default_features, target, kind + } = dep; + + let mut dep = DEFAULT_ID.with(|id| { + Dependency::parse_no_deprecated(&name, Some(&req), id) + })?; + let kind = match kind.as_ref().map(|s| &s[..]).unwrap_or("") { + "dev" => Kind::Development, + "build" => Kind::Build, + _ => Kind::Normal, + }; + + let platform = match target { + Some(target) => Some(target.parse()?), + None => None, + }; + + // Unfortunately older versions of cargo and/or the registry ended up + // publishing lots of entries where the features array contained the + // empty feature, "", inside. This confuses the resolution process much + // later on and these features aren't actually valid, so filter them all + // out here. + let features = features.into_iter().filter(|s| !s.is_empty()).collect(); + + dep.set_optional(optional) + .set_default_features(default_features) + .set_features(features) + .set_platform(platform) + .set_kind(kind); + Ok(dep) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/registry/remote.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/remote.rs new file mode 100644 index 000000000..6704282b8 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/registry/remote.rs @@ -0,0 +1,260 @@ +use std::cell::{RefCell, Ref, Cell}; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::mem; +use std::path::Path; + +use git2; +use hex::ToHex; +use serde_json; + +use core::{PackageId, SourceId}; +use ops; +use sources::git; +use sources::registry::{RegistryData, RegistryConfig, INDEX_LOCK}; +use util::network; +use util::{FileLock, Filesystem, LazyCell}; +use util::{Config, Sha256, ToUrl}; +use util::errors::{CargoErrorKind, CargoResult, CargoResultExt}; + +pub struct RemoteRegistry<'cfg> { + index_path: Filesystem, + cache_path: Filesystem, + source_id: SourceId, + config: &'cfg Config, + tree: RefCell>>, + repo: LazyCell, + head: Cell>, +} + +impl<'cfg> RemoteRegistry<'cfg> { + pub fn new(source_id: &SourceId, config: &'cfg Config, name: &str) + -> RemoteRegistry<'cfg> { + RemoteRegistry { + index_path: config.registry_index_path().join(name), + cache_path: config.registry_cache_path().join(name), + source_id: source_id.clone(), + config: config, + tree: RefCell::new(None), + repo: LazyCell::new(), + head: Cell::new(None), + } + } + + fn repo(&self) -> CargoResult<&git2::Repository> { + self.repo.get_or_try_init(|| { + let path = self.index_path.clone().into_path_unlocked(); + + // Fast path without a lock + if let Ok(repo) = git2::Repository::open(&path) { + return Ok(repo) + } + + // Ok, now we need to lock and try the whole thing over again. + let lock = self.index_path.open_rw(Path::new(INDEX_LOCK), + self.config, + "the registry index")?; + match git2::Repository::open(&path) { + Ok(repo) => Ok(repo), + Err(_) => { + let _ = lock.remove_siblings(); + + // Note that we'd actually prefer to use a bare repository + // here as we're not actually going to check anything out. + // All versions of Cargo, though, share the same CARGO_HOME, + // so for compatibility with older Cargo which *does* do + // checkouts we make sure to initialize a new full + // repository (not a bare one). + // + // We should change this to `init_bare` whenever we feel + // like enough time has passed or if we change the directory + // that the folder is located in, such as by changing the + // hash at the end of the directory. + Ok(git2::Repository::init(&path)?) + } + } + }) + } + + fn head(&self) -> CargoResult { + if self.head.get().is_none() { + let oid = self.repo()?.refname_to_id("refs/remotes/origin/master")?; + self.head.set(Some(oid)); + } + Ok(self.head.get().unwrap()) + } + + fn tree(&self) -> CargoResult> { + { + let tree = self.tree.borrow(); + if tree.is_some() { + return Ok(Ref::map(tree, |s| s.as_ref().unwrap())) + } + } + let repo = self.repo()?; + let commit = repo.find_commit(self.head()?)?; + let tree = commit.tree()?; + + // Unfortunately in libgit2 the tree objects look like they've got a + // reference to the repository object which means that a tree cannot + // outlive the repository that it came from. Here we want to cache this + // tree, though, so to accomplish this we transmute it to a static + // lifetime. + // + // Note that we don't actually hand out the static lifetime, instead we + // only return a scoped one from this function. Additionally the repo + // we loaded from (above) lives as long as this object + // (`RemoteRegistry`) so we then just need to ensure that the tree is + // destroyed first in the destructor, hence the destructor on + // `RemoteRegistry` below. + let tree = unsafe { + mem::transmute::>(tree) + }; + *self.tree.borrow_mut() = Some(tree); + Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap())) + } +} + +impl<'cfg> RegistryData for RemoteRegistry<'cfg> { + fn index_path(&self) -> &Filesystem { + &self.index_path + } + + fn load(&self, + _root: &Path, + path: &Path, + data: &mut FnMut(&[u8]) -> CargoResult<()>) -> CargoResult<()> { + // Note that the index calls this method and the filesystem is locked + // in the index, so we don't need to worry about an `update_index` + // happening in a different process. + let repo = self.repo()?; + let tree = self.tree()?; + let entry = tree.get_path(path)?; + let object = entry.to_object(repo)?; + let blob = match object.as_blob() { + Some(blob) => blob, + None => bail!("path `{}` is not a blob in the git repo", path.display()), + }; + data(blob.content()) + } + + fn config(&mut self) -> CargoResult> { + self.repo()?; // create intermediate dirs and initialize the repo + let _lock = self.index_path.open_ro(Path::new(INDEX_LOCK), + self.config, + "the registry index")?; + let mut config = None; + self.load(Path::new(""), Path::new("config.json"), &mut |json| { + config = Some(serde_json::from_slice(json)?); + Ok(()) + })?; + Ok(config) + } + + fn update_index(&mut self) -> CargoResult<()> { + // Ensure that we'll actually be able to acquire an HTTP handle later on + // once we start trying to download crates. This will weed out any + // problems with `.cargo/config` configuration related to HTTP. + // + // This way if there's a problem the error gets printed before we even + // hit the index, which may not actually read this configuration. + ops::http_handle(self.config)?; + + self.repo()?; + self.head.set(None); + *self.tree.borrow_mut() = None; + let _lock = self.index_path.open_rw(Path::new(INDEX_LOCK), + self.config, + "the registry index")?; + self.config.shell().status("Updating", + format!("registry `{}`", self.source_id.url()))?; + + // git fetch origin master + let url = self.source_id.url(); + let refspec = "refs/heads/master:refs/remotes/origin/master"; + let repo = self.repo.borrow_mut().unwrap(); + git::fetch(repo, url, refspec, self.config).chain_err(|| { + format!("failed to fetch `{}`", url) + })?; + Ok(()) + } + + fn download(&mut self, pkg: &PackageId, checksum: &str) + -> CargoResult { + let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); + let path = Path::new(&filename); + + // Attempt to open an read-only copy first to avoid an exclusive write + // lock and also work with read-only filesystems. Note that we check the + // length of the file like below to handle interrupted downloads. + // + // If this fails then we fall through to the exclusive path where we may + // have to redownload the file. + if let Ok(dst) = self.cache_path.open_ro(path, self.config, &filename) { + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(dst) + } + } + let mut dst = self.cache_path.open_rw(path, self.config, &filename)?; + let meta = dst.file().metadata()?; + if meta.len() > 0 { + return Ok(dst) + } + self.config.shell().status("Downloading", pkg)?; + + let config = self.config()?.unwrap(); + let mut url = config.dl.to_url()?; + url.path_segments_mut().unwrap() + .push(pkg.name()) + .push(&pkg.version().to_string()) + .push("download"); + + // TODO: don't download into memory, but ensure that if we ctrl-c a + // download we should resume either from the start or the middle + // on the next time + let url = url.to_string(); + let mut handle = self.config.http()?.borrow_mut(); + handle.get(true)?; + handle.url(&url)?; + handle.follow_location(true)?; + let mut state = Sha256::new(); + let mut body = Vec::new(); + network::with_retry(self.config, || { + state = Sha256::new(); + body = Vec::new(); + { + let mut handle = handle.transfer(); + handle.write_function(|buf| { + state.update(buf); + body.extend_from_slice(buf); + Ok(buf.len()) + })?; + handle.perform()?; + } + let code = handle.response_code()?; + if code != 200 && code != 0 { + let url = handle.effective_url()?.unwrap_or(&url); + Err(CargoErrorKind::HttpNot200(code, url.to_string()).into()) + } else { + Ok(()) + } + })?; + + // Verify what we just downloaded + if state.finish().to_hex() != checksum { + bail!("failed to verify the checksum of `{}`", pkg) + } + + dst.write_all(&body)?; + dst.seek(SeekFrom::Start(0))?; + Ok(dst) + } +} + +impl<'cfg> Drop for RemoteRegistry<'cfg> { + fn drop(&mut self) { + // Just be sure to drop this before our other fields + self.tree.borrow_mut().take(); + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/sources/replaced.rs b/collector/compile-benchmarks/cargo/src/cargo/sources/replaced.rs new file mode 100644 index 000000000..5048f6186 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/sources/replaced.rs @@ -0,0 +1,75 @@ +use core::{Source, Registry, PackageId, Package, Dependency, Summary, SourceId}; +use util::errors::{CargoResult, CargoResultExt}; + +pub struct ReplacedSource<'cfg> { + to_replace: SourceId, + replace_with: SourceId, + inner: Box, +} + +impl<'cfg> ReplacedSource<'cfg> { + pub fn new(to_replace: &SourceId, + replace_with: &SourceId, + src: Box) -> ReplacedSource<'cfg> { + ReplacedSource { + to_replace: to_replace.clone(), + replace_with: replace_with.clone(), + inner: src, + } + } +} + +impl<'cfg> Registry for ReplacedSource<'cfg> { + fn query(&mut self, + dep: &Dependency, + f: &mut FnMut(Summary)) -> CargoResult<()> { + let (replace_with, to_replace) = (&self.replace_with, &self.to_replace); + let dep = dep.clone().map_source(to_replace, replace_with); + + self.inner.query(&dep, &mut |summary| { + f(summary.map_source(replace_with, to_replace)) + }).chain_err(|| { + format!("failed to query replaced source `{}`", + self.to_replace) + }) + } + + fn supports_checksums(&self) -> bool { + self.inner.supports_checksums() + } + + fn requires_precise(&self) -> bool { + self.inner.requires_precise() + } +} + +impl<'cfg> Source for ReplacedSource<'cfg> { + fn source_id(&self) -> &SourceId { + &self.to_replace + } + + fn update(&mut self) -> CargoResult<()> { + self.inner.update().chain_err(|| { + format!("failed to update replaced source `{}`", + self.to_replace) + }) + } + + fn download(&mut self, id: &PackageId) -> CargoResult { + let id = id.with_source_id(&self.replace_with); + let pkg = self.inner.download(&id).chain_err(|| { + format!("failed to download replaced source `{}`", + self.to_replace) + })?; + Ok(pkg.map_source(&self.replace_with, &self.to_replace)) + } + + fn fingerprint(&self, id: &Package) -> CargoResult { + self.inner.fingerprint(id) + } + + fn verify(&self, id: &PackageId) -> CargoResult<()> { + let id = id.with_source_id(&self.replace_with); + self.inner.verify(&id) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/cfg.rs b/collector/compile-benchmarks/cargo/src/cargo/util/cfg.rs new file mode 100644 index 000000000..341b24d6d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/cfg.rs @@ -0,0 +1,261 @@ +use std::str::{self, FromStr}; +use std::iter; +use std::fmt; + +use util::{CargoError, CargoResult}; + +#[derive(Clone, PartialEq, Debug)] +pub enum Cfg { + Name(String), + KeyPair(String, String), +} + +#[derive(Clone, PartialEq, Debug)] +pub enum CfgExpr { + Not(Box), + All(Vec), + Any(Vec), + Value(Cfg), +} + +#[derive(PartialEq)] +enum Token<'a> { + LeftParen, + RightParen, + Ident(&'a str), + Comma, + Equals, + String(&'a str), +} + +struct Tokenizer<'a> { + s: iter::Peekable>, + orig: &'a str, +} + +struct Parser<'a> { + t: iter::Peekable>, +} + +impl FromStr for Cfg { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.cfg()?; + if p.t.next().is_some() { + bail!("malformed cfg value or key/value pair: `{}`", s) + } + Ok(e) + } +} + +impl fmt::Display for Cfg { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Cfg::Name(ref s) => s.fmt(f), + Cfg::KeyPair(ref k, ref v) => write!(f, "{} = \"{}\"", k, v), + } + } +} + +impl CfgExpr { + pub fn matches(&self, cfg: &[Cfg]) -> bool { + match *self { + CfgExpr::Not(ref e) => !e.matches(cfg), + CfgExpr::All(ref e) => e.iter().all(|e| e.matches(cfg)), + CfgExpr::Any(ref e) => e.iter().any(|e| e.matches(cfg)), + CfgExpr::Value(ref e) => cfg.contains(e), + } + } +} + +impl FromStr for CfgExpr { + type Err = CargoError; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.expr()?; + if p.t.next().is_some() { + bail!("can only have one cfg-expression, consider using all() or \ + any() explicitly") + } + Ok(e) + } +} + +impl fmt::Display for CfgExpr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CfgExpr::Not(ref e) => write!(f, "not({})", e), + CfgExpr::All(ref e) => write!(f, "all({})", CommaSep(e)), + CfgExpr::Any(ref e) => write!(f, "any({})", CommaSep(e)), + CfgExpr::Value(ref e) => write!(f, "{}", e), + } + } +} + +struct CommaSep<'a, T: 'a>(&'a [T]); + +impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for (i, v) in self.0.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", v)?; + } + Ok(()) + } +} + +impl<'a> Parser<'a> { + fn new(s: &'a str) -> Parser<'a> { + Parser { + t: Tokenizer { + s: s.char_indices().peekable(), + orig: s, + }.peekable(), + } + } + + fn expr(&mut self) -> CargoResult { + match self.t.peek() { + Some(&Ok(Token::Ident(op @ "all"))) | + Some(&Ok(Token::Ident(op @ "any"))) => { + self.t.next(); + let mut e = Vec::new(); + self.eat(Token::LeftParen)?; + while !self.try(Token::RightParen) { + e.push(self.expr()?); + if !self.try(Token::Comma) { + self.eat(Token::RightParen)?; + break + } + } + if op == "all" { + Ok(CfgExpr::All(e)) + } else { + Ok(CfgExpr::Any(e)) + } + } + Some(&Ok(Token::Ident("not"))) => { + self.t.next(); + self.eat(Token::LeftParen)?; + let e = self.expr()?; + self.eat(Token::RightParen)?; + Ok(CfgExpr::Not(Box::new(e))) + } + Some(&Ok(..)) => self.cfg().map(CfgExpr::Value), + Some(&Err(..)) => { + Err(self.t.next().unwrap().err().unwrap()) + } + None => bail!("expected start of a cfg expression, \ + found nothing"), + } + } + + fn cfg(&mut self) -> CargoResult { + match self.t.next() { + Some(Ok(Token::Ident(name))) => { + let e = if self.try(Token::Equals) { + let val = match self.t.next() { + Some(Ok(Token::String(s))) => s, + Some(Ok(t)) => bail!("expected a string, found {}", + t.classify()), + Some(Err(e)) => return Err(e), + None => bail!("expected a string, found nothing"), + }; + Cfg::KeyPair(name.to_string(), val.to_string()) + } else { + Cfg::Name(name.to_string()) + }; + Ok(e) + } + Some(Ok(t)) => bail!("expected identifier, found {}", t.classify()), + Some(Err(e)) => Err(e), + None => bail!("expected identifier, found nothing"), + } + } + + fn try(&mut self, token: Token<'a>) -> bool { + match self.t.peek() { + Some(&Ok(ref t)) if token == *t => {} + _ => return false, + } + self.t.next(); + true + } + + fn eat(&mut self, token: Token<'a>) -> CargoResult<()> { + match self.t.next() { + Some(Ok(ref t)) if token == *t => Ok(()), + Some(Ok(t)) => bail!("expected {}, found {}", token.classify(), + t.classify()), + Some(Err(e)) => Err(e), + None => bail!("expected {}, but cfg expr ended", token.classify()), + } + } +} + +impl<'a> Iterator for Tokenizer<'a> { + type Item = CargoResult>; + + fn next(&mut self) -> Option>> { + loop { + match self.s.next() { + Some((_, ' ')) => {} + Some((_, '(')) => return Some(Ok(Token::LeftParen)), + Some((_, ')')) => return Some(Ok(Token::RightParen)), + Some((_, ',')) => return Some(Ok(Token::Comma)), + Some((_, '=')) => return Some(Ok(Token::Equals)), + Some((start, '"')) => { + while let Some((end, ch)) = self.s.next() { + if ch == '"' { + return Some(Ok(Token::String(&self.orig[start+1..end]))) + } + } + return Some(Err("unterminated string in cfg".into())) + } + Some((start, ch)) if is_ident_start(ch) => { + while let Some(&(end, ch)) = self.s.peek() { + if !is_ident_rest(ch) { + return Some(Ok(Token::Ident(&self.orig[start..end]))) + } else { + self.s.next(); + } + } + return Some(Ok(Token::Ident(&self.orig[start..]))) + } + Some((_, ch)) => { + return Some(Err(format!("unexpected character in \ + cfg `{}`, expected parens, \ + a comma, an identifier, or \ + a string", ch).into())) + } + None => return None + } + } + } +} + +fn is_ident_start(ch: char) -> bool { + ch == '_' || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') +} + +fn is_ident_rest(ch: char) -> bool { + is_ident_start(ch) || ('0' <= ch && ch <= '9') +} + +impl<'a> Token<'a> { + fn classify(&self) -> &str { + match *self { + Token::LeftParen => "`(`", + Token::RightParen => "`)`", + Token::Ident(..) => "an identifier", + Token::Comma => "`,`", + Token::Equals => "`=`", + Token::String(..) => "a string", + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/config.rs b/collector/compile-benchmarks/cargo/src/cargo/util/config.rs new file mode 100644 index 000000000..039993567 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/config.rs @@ -0,0 +1,882 @@ +use std::cell::{RefCell, RefMut}; +use std::collections::HashSet; +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::hash_map::HashMap; +use std::env; +use std::fmt; +use std::fs::{self, File}; +use std::io::SeekFrom; +use std::io::prelude::*; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::{Once, ONCE_INIT}; + +use curl::easy::Easy; +use jobserver; +use serde::{Serialize, Serializer}; +use toml; + +use core::shell::Verbosity; +use core::{Shell, CliUnstable}; +use ops; +use util::Rustc; +use util::errors::{CargoResult, CargoResultExt, CargoError, internal}; +use util::paths; +use util::toml as cargo_toml; +use util::{Filesystem, LazyCell}; + +use self::ConfigValue as CV; + +/// Configuration information for cargo. This is not specific to a build, it is information +/// relating to cargo itself. +/// +/// This struct implements `Default`: all fields can be inferred. +#[derive(Debug)] +pub struct Config { + /// The location of the users's 'home' directory. OS-dependent. + home_path: Filesystem, + /// Information about how to write messages to the shell + shell: RefCell, + /// Information on how to invoke the compiler (rustc) + rustc: LazyCell, + /// A collection of configuration options + values: LazyCell>, + /// The current working directory of cargo + cwd: PathBuf, + /// The location of the cargo executable (path to current process) + cargo_exe: LazyCell, + /// The location of the rustdoc executable + rustdoc: LazyCell, + /// Whether we are printing extra verbose messages + extra_verbose: bool, + /// `frozen` is set if we shouldn't access the network + frozen: bool, + /// `locked` is set if we should not update lock files + locked: bool, + /// A global static IPC control mechanism (used for managing parallel builds) + jobserver: Option, + /// Cli flags of the form "-Z something" + cli_flags: CliUnstable, + /// A handle on curl easy mode for http calls + easy: LazyCell>, +} + +impl Config { + pub fn new(shell: Shell, + cwd: PathBuf, + homedir: PathBuf) -> Config { + static mut GLOBAL_JOBSERVER: *mut jobserver::Client = 0 as *mut _; + static INIT: Once = ONCE_INIT; + + // This should be called early on in the process, so in theory the + // unsafety is ok here. (taken ownership of random fds) + INIT.call_once(|| unsafe { + if let Some(client) = jobserver::Client::from_env() { + GLOBAL_JOBSERVER = Box::into_raw(Box::new(client)); + } + }); + + Config { + home_path: Filesystem::new(homedir), + shell: RefCell::new(shell), + rustc: LazyCell::new(), + cwd: cwd, + values: LazyCell::new(), + cargo_exe: LazyCell::new(), + rustdoc: LazyCell::new(), + extra_verbose: false, + frozen: false, + locked: false, + jobserver: unsafe { + if GLOBAL_JOBSERVER.is_null() { + None + } else { + Some((*GLOBAL_JOBSERVER).clone()) + } + }, + cli_flags: CliUnstable::default(), + easy: LazyCell::new(), + } + } + + pub fn default() -> CargoResult { + let shell = Shell::new(); + let cwd = env::current_dir().chain_err(|| { + "couldn't get the current directory of the process" + })?; + let homedir = homedir(&cwd).ok_or_else(|| { + "Cargo couldn't find your home directory. \ + This probably means that $HOME was not set." + })?; + Ok(Config::new(shell, cwd, homedir)) + } + + /// The user's cargo home directory (OS-dependent) + pub fn home(&self) -> &Filesystem { &self.home_path } + + /// The cargo git directory (`/git`) + pub fn git_path(&self) -> Filesystem { + self.home_path.join("git") + } + + /// The cargo registry index directory (`/registry/index`) + pub fn registry_index_path(&self) -> Filesystem { + self.home_path.join("registry").join("index") + } + + /// The cargo registry cache directory (`/registry/path`) + pub fn registry_cache_path(&self) -> Filesystem { + self.home_path.join("registry").join("cache") + } + + /// The cargo registry source directory (`/registry/src`) + pub fn registry_source_path(&self) -> Filesystem { + self.home_path.join("registry").join("src") + } + + /// Get a reference to the shell, for e.g. writing error messages + pub fn shell(&self) -> RefMut { + self.shell.borrow_mut() + } + + /// Get the path to the `rustdoc` executable + pub fn rustdoc(&self) -> CargoResult<&Path> { + self.rustdoc.get_or_try_init(|| self.get_tool("rustdoc")).map(AsRef::as_ref) + } + + /// Get the path to the `rustc` executable + pub fn rustc(&self) -> CargoResult<&Rustc> { + self.rustc.get_or_try_init(|| Rustc::new(self.get_tool("rustc")?, + self.maybe_get_tool("rustc_wrapper")?)) + } + + /// Get the path to the `cargo` executable + pub fn cargo_exe(&self) -> CargoResult<&Path> { + self.cargo_exe.get_or_try_init(|| + env::current_exe().and_then(|path| path.canonicalize()) + .chain_err(|| "couldn't get the path to cargo executable") + ).map(AsRef::as_ref) + } + + pub fn values(&self) -> CargoResult<&HashMap> { + self.values.get_or_try_init(|| self.load_values()) + } + + pub fn set_values(&self, values: HashMap) -> CargoResult<()> { + if self.values.borrow().is_some() { + return Err("Config values already found".into()); + } + match self.values.fill(values) { + Ok(()) => Ok(()), + Err(_) => Err("Could not fill values".into()), + } + } + + pub fn cwd(&self) -> &Path { &self.cwd } + + pub fn target_dir(&self) -> CargoResult> { + if let Some(dir) = env::var_os("CARGO_TARGET_DIR") { + Ok(Some(Filesystem::new(self.cwd.join(dir)))) + } else if let Some(val) = self.get_path("build.target-dir")? { + let val = self.cwd.join(val.val); + Ok(Some(Filesystem::new(val))) + } else { + Ok(None) + } + } + + fn get(&self, key: &str) -> CargoResult> { + let vals = self.values()?; + let mut parts = key.split('.').enumerate(); + let mut val = match vals.get(parts.next().unwrap().1) { + Some(val) => val, + None => return Ok(None), + }; + for (i, part) in parts { + match *val { + CV::Table(ref map, _) => { + val = match map.get(part) { + Some(val) => val, + None => return Ok(None), + } + } + CV::Integer(_, ref path) | + CV::String(_, ref path) | + CV::List(_, ref path) | + CV::Boolean(_, ref path) => { + let idx = key.split('.').take(i) + .fold(0, |n, s| n + s.len()) + i - 1; + let key_so_far = &key[..idx]; + bail!("expected table for configuration key `{}`, \ + but found {} in {}", + key_so_far, val.desc(), path.display()) + } + } + } + Ok(Some(val.clone())) + } + + fn get_env(&self, key: &str) -> CargoResult>> + where CargoError: From + { + let key = key.replace(".", "_") + .replace("-", "_") + .chars() + .flat_map(|c| c.to_uppercase()) + .collect::(); + match env::var(&format!("CARGO_{}", key)) { + Ok(value) => { + Ok(Some(Value { + val: value.parse()?, + definition: Definition::Environment, + })) + } + Err(..) => Ok(None), + } + } + + pub fn get_string(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)) + } + match self.get(key)? { + Some(CV::String(i, path)) => { + Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("string", key, val), + None => Ok(None), + } + } + + pub fn get_bool(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)) + } + match self.get(key)? { + Some(CV::Boolean(b, path)) => { + Ok(Some(Value { + val: b, + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("bool", key, val), + None => Ok(None), + } + } + + fn string_to_path(&self, value: String, definition: &Definition) -> PathBuf { + let is_path = value.contains('/') || + (cfg!(windows) && value.contains('\\')); + if is_path { + definition.root(self).join(value) + } else { + // A pathless name + PathBuf::from(value) + } + } + + pub fn get_path(&self, key: &str) -> CargoResult>> { + if let Some(val) = self.get_string(key)? { + Ok(Some(Value { + val: self.string_to_path(val.val, &val.definition), + definition: val.definition + })) + } else { + Ok(None) + } + } + + pub fn get_path_and_args(&self, key: &str) + -> CargoResult)>>> { + if let Some(mut val) = self.get_list_or_split_string(key)? { + if !val.val.is_empty() { + return Ok(Some(Value { + val: (self.string_to_path(val.val.remove(0), &val.definition), val.val), + definition: val.definition + })); + } + } + Ok(None) + } + + pub fn get_list(&self, key: &str) + -> CargoResult>>> { + match self.get(key)? { + Some(CV::List(i, path)) => { + Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("list", key, val), + None => Ok(None), + } + } + + pub fn get_list_or_split_string(&self, key: &str) + -> CargoResult>>> { + match self.get_env::(key) { + Ok(Some(value)) => + return Ok(Some(Value { + val: value.val.split(' ').map(str::to_string).collect(), + definition: value.definition + })), + Err(err) => return Err(err), + Ok(None) => (), + } + + match self.get(key)? { + Some(CV::List(i, path)) => { + Ok(Some(Value { + val: i.into_iter().map(|(s, _)| s).collect(), + definition: Definition::Path(path), + })) + } + Some(CV::String(i, path)) => { + Ok(Some(Value { + val: i.split(' ').map(str::to_string).collect(), + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("list or string", key, val), + None => Ok(None), + } + } + + pub fn get_table(&self, key: &str) + -> CargoResult>>> { + match self.get(key)? { + Some(CV::Table(i, path)) => { + Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("table", key, val), + None => Ok(None), + } + } + + pub fn get_i64(&self, key: &str) -> CargoResult>> { + if let Some(v) = self.get_env(key)? { + return Ok(Some(v)) + } + match self.get(key)? { + Some(CV::Integer(i, path)) => { + Ok(Some(Value { + val: i, + definition: Definition::Path(path), + })) + } + Some(val) => self.expected("integer", key, val), + None => Ok(None), + } + } + + pub fn net_retry(&self) -> CargoResult { + match self.get_i64("net.retry")? { + Some(v) => { + let value = v.val; + if value < 0 { + bail!("net.retry must be positive, but found {} in {}", + v.val, v.definition) + } else { + Ok(value) + } + } + None => Ok(2), + } + } + + pub fn expected(&self, ty: &str, key: &str, val: CV) -> CargoResult { + val.expected(ty, key).map_err(|e| { + format!("invalid configuration for key `{}`\n{}", key, e).into() + }) + } + + pub fn configure(&mut self, + verbose: u32, + quiet: Option, + color: &Option, + frozen: bool, + locked: bool, + unstable_flags: &[String]) -> CargoResult<()> { + let extra_verbose = verbose >= 2; + let verbose = if verbose == 0 {None} else {Some(true)}; + + // Ignore errors in the configuration files. + let cfg_verbose = self.get_bool("term.verbose").unwrap_or(None).map(|v| v.val); + let cfg_color = self.get_string("term.color").unwrap_or(None).map(|v| v.val); + + let color = color.as_ref().or_else(|| cfg_color.as_ref()); + + let verbosity = match (verbose, cfg_verbose, quiet) { + (Some(true), _, None) | + (None, Some(true), None) => Verbosity::Verbose, + + // command line takes precedence over configuration, so ignore the + // configuration. + (None, _, Some(true)) => Verbosity::Quiet, + + // Can't pass both at the same time on the command line regardless + // of configuration. + (Some(true), _, Some(true)) => { + bail!("cannot set both --verbose and --quiet"); + } + + // Can't actually get `Some(false)` as a value from the command + // line, so just ignore them here to appease exhaustiveness checking + // in match statements. + (Some(false), _, _) | + (_, _, Some(false)) | + + (None, Some(false), None) | + (None, None, None) => Verbosity::Normal, + }; + + self.shell().set_verbosity(verbosity); + self.shell().set_color_choice(color.map(|s| &s[..]))?; + self.extra_verbose = extra_verbose; + self.frozen = frozen; + self.locked = locked; + self.cli_flags.parse(unstable_flags)?; + + Ok(()) + } + + pub fn cli_unstable(&self) -> &CliUnstable { + &self.cli_flags + } + + pub fn extra_verbose(&self) -> bool { + self.extra_verbose + } + + pub fn network_allowed(&self) -> bool { + !self.frozen + } + + pub fn lock_update_allowed(&self) -> bool { + !self.frozen && !self.locked + } + + /// Loads configuration from the filesystem + pub fn load_values(&self) -> CargoResult> { + let mut cfg = CV::Table(HashMap::new(), PathBuf::from(".")); + + walk_tree(&self.cwd, |path| { + let mut contents = String::new(); + let mut file = File::open(&path)?; + file.read_to_string(&mut contents).chain_err(|| { + format!("failed to read configuration file `{}`", + path.display()) + })?; + let toml = cargo_toml::parse(&contents, + path, + self).chain_err(|| { + format!("could not parse TOML configuration in `{}`", + path.display()) + })?; + let value = CV::from_toml(path, toml).chain_err(|| { + format!("failed to load TOML configuration from `{}`", + path.display()) + })?; + cfg.merge(value).chain_err(|| { + format!("failed to merge configuration at `{}`", path.display()) + })?; + Ok(()) + }).chain_err(|| "Couldn't load Cargo configuration")?; + + self.load_credentials(&mut cfg)?; + match cfg { + CV::Table(map, _) => Ok(map), + _ => unreachable!(), + } + } + + /// Loads credentials config from the credentials file into the ConfigValue object, if present. + fn load_credentials(&self, cfg: &mut ConfigValue) -> CargoResult<()> { + let home_path = self.home_path.clone().into_path_unlocked(); + let credentials = home_path.join("credentials"); + if !fs::metadata(&credentials).is_ok() { + return Ok(()); + } + + let mut contents = String::new(); + let mut file = File::open(&credentials)?; + file.read_to_string(&mut contents).chain_err(|| { + format!("failed to read configuration file `{}`", credentials.display()) + })?; + + let toml = cargo_toml::parse(&contents, + &credentials, + self).chain_err(|| { + format!("could not parse TOML configuration in `{}`", credentials.display()) + })?; + + let value = CV::from_toml(&credentials, toml).chain_err(|| { + format!("failed to load TOML configuration from `{}`", credentials.display()) + })?; + + let cfg = match *cfg { + CV::Table(ref mut map, _) => map, + _ => unreachable!(), + }; + + let registry = cfg.entry("registry".into()) + .or_insert_with(|| CV::Table(HashMap::new(), PathBuf::from("."))); + + match (registry, value) { + (&mut CV::Table(ref mut old, _), CV::Table(ref mut new, _)) => { + // Take ownership of `new` by swapping it with an empty hashmap, so we can move + // into an iterator. + let new = mem::replace(new, HashMap::new()); + for (key, value) in new { + old.insert(key, value); + } + } + _ => unreachable!(), + } + + Ok(()) + } + + /// Look for a path for `tool` in an environment variable or config path, but return `None` + /// if it's not present. + fn maybe_get_tool(&self, tool: &str) -> CargoResult> { + let var = tool.chars().flat_map(|c| c.to_uppercase()).collect::(); + if let Some(tool_path) = env::var_os(&var) { + return Ok(Some(PathBuf::from(tool_path))); + } + + let var = format!("build.{}", tool); + if let Some(tool_path) = self.get_path(&var)? { + return Ok(Some(tool_path.val)); + } + + Ok(None) + } + + /// Look for a path for `tool` in an environment variable or config path, defaulting to `tool` + /// as a path. + fn get_tool(&self, tool: &str) -> CargoResult { + self.maybe_get_tool(tool) + .map(|t| t.unwrap_or_else(|| PathBuf::from(tool))) + } + + pub fn jobserver_from_env(&self) -> Option<&jobserver::Client> { + self.jobserver.as_ref() + } + + pub fn http(&self) -> CargoResult<&RefCell> { + self.easy.get_or_try_init(|| { + ops::http_handle(self).map(RefCell::new) + }) + } +} + +#[derive(Eq, PartialEq, Clone, Copy)] +pub enum Location { + Project, + Global +} + +#[derive(Eq,PartialEq,Clone,Deserialize)] +pub enum ConfigValue { + Integer(i64, PathBuf), + String(String, PathBuf), + List(Vec<(String, PathBuf)>, PathBuf), + Table(HashMap, PathBuf), + Boolean(bool, PathBuf), +} + +pub struct Value { + pub val: T, + pub definition: Definition, +} + +pub enum Definition { + Path(PathBuf), + Environment, +} + +impl fmt::Debug for ConfigValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CV::Integer(i, ref path) => write!(f, "{} (from {})", i, + path.display()), + CV::Boolean(b, ref path) => write!(f, "{} (from {})", b, + path.display()), + CV::String(ref s, ref path) => write!(f, "{} (from {})", s, + path.display()), + CV::List(ref list, ref path) => { + write!(f, "[")?; + for (i, &(ref s, ref path)) in list.iter().enumerate() { + if i > 0 { write!(f, ", ")?; } + write!(f, "{} (from {})", s, path.display())?; + } + write!(f, "] (from {})", path.display()) + } + CV::Table(ref table, _) => write!(f, "{:?}", table), + } + } +} + +impl Serialize for ConfigValue { + fn serialize(&self, s: S) -> Result { + match *self { + CV::String(ref string, _) => string.serialize(s), + CV::List(ref list, _) => { + let list: Vec<&String> = list.iter().map(|s| &s.0).collect(); + list.serialize(s) + } + CV::Table(ref table, _) => table.serialize(s), + CV::Boolean(b, _) => b.serialize(s), + CV::Integer(i, _) => i.serialize(s), + } + } +} + +impl ConfigValue { + fn from_toml(path: &Path, toml: toml::Value) -> CargoResult { + match toml { + toml::Value::String(val) => Ok(CV::String(val, path.to_path_buf())), + toml::Value::Boolean(b) => Ok(CV::Boolean(b, path.to_path_buf())), + toml::Value::Integer(i) => Ok(CV::Integer(i, path.to_path_buf())), + toml::Value::Array(val) => { + Ok(CV::List(val.into_iter().map(|toml| { + match toml { + toml::Value::String(val) => Ok((val, path.to_path_buf())), + v => Err(format!("expected string but found {} \ + in list", v.type_str()).into()), + } + }).collect::>()?, path.to_path_buf())) + } + toml::Value::Table(val) => { + Ok(CV::Table(val.into_iter().map(|(key, value)| { + let value = CV::from_toml(path, value).chain_err(|| { + format!("failed to parse key `{}`", key) + })?; + Ok((key, value)) + }).collect::>()?, path.to_path_buf())) + } + v => bail!("found TOML configuration value of unknown type `{}`", + v.type_str()), + } + } + + fn into_toml(self) -> toml::Value { + match self { + CV::Boolean(s, _) => toml::Value::Boolean(s), + CV::String(s, _) => toml::Value::String(s), + CV::Integer(i, _) => toml::Value::Integer(i), + CV::List(l, _) => toml::Value::Array(l + .into_iter() + .map(|(s, _)| toml::Value::String(s)) + .collect()), + CV::Table(l, _) => toml::Value::Table(l.into_iter() + .map(|(k, v)| (k, v.into_toml())) + .collect()), + } + } + + fn merge(&mut self, from: ConfigValue) -> CargoResult<()> { + match (self, from) { + (&mut CV::String(..), CV::String(..)) | + (&mut CV::Integer(..), CV::Integer(..)) | + (&mut CV::Boolean(..), CV::Boolean(..)) => {} + (&mut CV::List(ref mut old, _), CV::List(ref mut new, _)) => { + let new = mem::replace(new, Vec::new()); + old.extend(new.into_iter()); + } + (&mut CV::Table(ref mut old, _), CV::Table(ref mut new, _)) => { + let new = mem::replace(new, HashMap::new()); + for (key, value) in new { + match old.entry(key.clone()) { + Occupied(mut entry) => { + let path = value.definition_path().to_path_buf(); + let entry = entry.get_mut(); + entry.merge(value).chain_err(|| { + format!("failed to merge key `{}` between \ + files:\n \ + file 1: {}\n \ + file 2: {}", + key, + entry.definition_path().display(), + path.display()) + + })?; + } + Vacant(entry) => { entry.insert(value); } + }; + } + } + (expected, found) => { + return Err(internal(format!("expected {}, but found {}", + expected.desc(), found.desc()))) + } + } + + Ok(()) + } + + pub fn i64(&self, key: &str) -> CargoResult<(i64, &Path)> { + match *self { + CV::Integer(i, ref p) => Ok((i, p)), + _ => self.expected("integer", key), + } + } + + pub fn string(&self, key: &str) -> CargoResult<(&str, &Path)> { + match *self { + CV::String(ref s, ref p) => Ok((s, p)), + _ => self.expected("string", key), + } + } + + pub fn table(&self, key: &str) + -> CargoResult<(&HashMap, &Path)> { + match *self { + CV::Table(ref table, ref p) => Ok((table, p)), + _ => self.expected("table", key), + } + } + + pub fn list(&self, key: &str) -> CargoResult<&[(String, PathBuf)]> { + match *self { + CV::List(ref list, _) => Ok(list), + _ => self.expected("list", key), + } + } + + pub fn boolean(&self, key: &str) -> CargoResult<(bool, &Path)> { + match *self { + CV::Boolean(b, ref p) => Ok((b, p)), + _ => self.expected("bool", key), + } + } + + pub fn desc(&self) -> &'static str { + match *self { + CV::Table(..) => "table", + CV::List(..) => "array", + CV::String(..) => "string", + CV::Boolean(..) => "boolean", + CV::Integer(..) => "integer", + } + } + + pub fn definition_path(&self) -> &Path { + match *self { + CV::Boolean(_, ref p) | + CV::Integer(_, ref p) | + CV::String(_, ref p) | + CV::List(_, ref p) | + CV::Table(_, ref p) => p + } + } + + fn expected(&self, wanted: &str, key: &str) -> CargoResult { + Err(format!("expected a {}, but found a {} for `{}` in {}", + wanted, self.desc(), key, + self.definition_path().display()).into()) + } +} + +impl Definition { + pub fn root<'a>(&'a self, config: &'a Config) -> &'a Path { + match *self { + Definition::Path(ref p) => p.parent().unwrap().parent().unwrap(), + Definition::Environment => config.cwd(), + } + } +} + +impl fmt::Display for Definition { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Definition::Path(ref p) => p.display().fmt(f), + Definition::Environment => "the environment".fmt(f), + } + } +} + +pub fn homedir(cwd: &Path) -> Option { + ::home::cargo_home_with_cwd(cwd).ok() +} + +fn walk_tree(pwd: &Path, mut walk: F) -> CargoResult<()> + where F: FnMut(&Path) -> CargoResult<()> +{ + let mut stash: HashSet = HashSet::new(); + + for current in paths::ancestors(pwd) { + let possible = current.join(".cargo").join("config"); + if fs::metadata(&possible).is_ok() { + walk(&possible)?; + stash.insert(possible); + } + } + + // Once we're done, also be sure to walk the home directory even if it's not + // in our history to be sure we pick up that standard location for + // information. + let home = homedir(pwd).ok_or_else(|| { + CargoError::from("Cargo couldn't find your home directory. \ + This probably means that $HOME was not set.") + })?; + let config = home.join("config"); + if !stash.contains(&config) && fs::metadata(&config).is_ok() { + walk(&config)?; + } + + Ok(()) +} + +pub fn save_credentials(cfg: &Config, + token: String) -> CargoResult<()> { + let mut file = { + cfg.home_path.create_dir()?; + cfg.home_path.open_rw(Path::new("credentials"), cfg, + "credentials' config file")? + }; + + let mut contents = String::new(); + file.read_to_string(&mut contents).chain_err(|| { + format!("failed to read configuration file `{}`", + file.path().display()) + })?; + let mut toml = cargo_toml::parse(&contents, file.path(), cfg)?; + toml.as_table_mut() + .unwrap() + .insert("token".to_string(), + ConfigValue::String(token, file.path().to_path_buf()).into_toml()); + + let contents = toml.to_string(); + file.seek(SeekFrom::Start(0))?; + file.write_all(contents.as_bytes())?; + file.file().set_len(contents.len() as u64)?; + set_permissions(file.file(), 0o600)?; + + return Ok(()); + + #[cfg(unix)] + fn set_permissions(file: & File, mode: u32) -> CargoResult<()> { + use std::os::unix::fs::PermissionsExt; + + let mut perms = file.metadata()?.permissions(); + perms.set_mode(mode); + file.set_permissions(perms)?; + Ok(()) + } + + #[cfg(not(unix))] + #[allow(unused)] + fn set_permissions(file: & File, mode: u32) -> CargoResult<()> { + Ok(()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/dependency_queue.rs b/collector/compile-benchmarks/cargo/src/cargo/util/dependency_queue.rs new file mode 100644 index 000000000..efe3cba9d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/dependency_queue.rs @@ -0,0 +1,144 @@ +//! A graph-like structure used to represent a set of dependencies and in what +//! order they should be built. +//! +//! This structure is used to store the dependency graph and dynamically update +//! it to figure out when a dependency should be built. + +use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::collections::{HashMap, HashSet}; +use std::hash::Hash; + +pub use self::Freshness::{Fresh, Dirty}; + +#[derive(Debug)] +pub struct DependencyQueue { + /// A list of all known keys to build. + /// + /// The value of the hash map is list of dependencies which still need to be + /// built before the package can be built. Note that the set is dynamically + /// updated as more dependencies are built. + dep_map: HashMap, V)>, + + /// A reverse mapping of a package to all packages that depend on that + /// package. + /// + /// This map is statically known and does not get updated throughout the + /// lifecycle of the DependencyQueue. + reverse_dep_map: HashMap>, + + /// A set of dirty packages. + /// + /// Packages may become dirty over time if their dependencies are rebuilt. + dirty: HashSet, + + /// The packages which are currently being built, waiting for a call to + /// `finish`. + pending: HashSet, +} + +/// Indication of the freshness of a package. +/// +/// A fresh package does not necessarily need to be rebuilt (unless a dependency +/// was also rebuilt), and a dirty package must always be rebuilt. +#[derive(PartialEq, Eq, Debug, Clone, Copy)] +pub enum Freshness { + Fresh, + Dirty, +} + +impl Freshness { + pub fn combine(&self, other: Freshness) -> Freshness { + match *self { Fresh => other, Dirty => Dirty } + } +} + +impl Default for DependencyQueue { + fn default() -> DependencyQueue { + DependencyQueue::new() + } +} + +impl DependencyQueue { + /// Creates a new dependency queue with 0 packages. + pub fn new() -> DependencyQueue { + DependencyQueue { + dep_map: HashMap::new(), + reverse_dep_map: HashMap::new(), + dirty: HashSet::new(), + pending: HashSet::new(), + } + } + + /// Adds a new package to this dependency queue. + /// + /// It is assumed that any dependencies of this package will eventually also + /// be added to the dependency queue. + pub fn queue(&mut self, + fresh: Freshness, + key: K, + value: V, + dependencies: &[K]) -> &mut V { + let slot = match self.dep_map.entry(key.clone()) { + Occupied(v) => return &mut v.into_mut().1, + Vacant(v) => v, + }; + + if fresh == Dirty { + self.dirty.insert(key.clone()); + } + + let mut my_dependencies = HashSet::new(); + for dep in dependencies { + my_dependencies.insert(dep.clone()); + let rev = self.reverse_dep_map.entry(dep.clone()) + .or_insert_with(HashSet::new); + rev.insert(key.clone()); + } + &mut slot.insert((my_dependencies, value)).1 + } + + /// Dequeues a package that is ready to be built. + /// + /// A package is ready to be built when it has 0 un-built dependencies. If + /// `None` is returned then no packages are ready to be built. + pub fn dequeue(&mut self) -> Option<(Freshness, K, V)> { + let key = match self.dep_map.iter() + .find(|&(_, &(ref deps, _))| deps.is_empty()) + .map(|(key, _)| key.clone()) { + Some(key) => key, + None => return None + }; + let (_, data) = self.dep_map.remove(&key).unwrap(); + let fresh = if self.dirty.contains(&key) {Dirty} else {Fresh}; + self.pending.insert(key.clone()); + Some((fresh, key, data)) + } + + /// Returns whether there are remaining packages to be built. + pub fn is_empty(&self) -> bool { + self.dep_map.is_empty() && self.pending.is_empty() + } + + /// Returns the number of remaining packages to be built. + pub fn len(&self) -> usize { + self.dep_map.len() + self.pending.len() + } + + /// Indicate that a package has been built. + /// + /// This function will update the dependency queue with this information, + /// possibly allowing the next invocation of `dequeue` to return a package. + pub fn finish(&mut self, key: &K, fresh: Freshness) { + assert!(self.pending.remove(key)); + let reverse_deps = match self.reverse_dep_map.get(key) { + Some(deps) => deps, + None => return, + }; + for dep in reverse_deps.iter() { + if fresh == Dirty { + self.dirty.insert(dep.clone()); + } + assert!(self.dep_map.get_mut(dep).unwrap().0.remove(key)); + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/errors.rs b/collector/compile-benchmarks/cargo/src/cargo/util/errors.rs new file mode 100644 index 000000000..70c501719 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/errors.rs @@ -0,0 +1,288 @@ +#![allow(unknown_lints)] + +use std::error::Error; +use std::fmt; +use std::io; +use std::num; +use std::process::{Output, ExitStatus}; +use std::str; +use std::string; + +use core::TargetKind; + +use curl; +use git2; +use semver; +use serde_json; +use toml; +use registry; +use ignore; + +error_chain! { + types { + CargoError, CargoErrorKind, CargoResultExt, CargoResult; + } + + links { + CrateRegistry(registry::Error, registry::ErrorKind); + } + + foreign_links { + ParseSemver(semver::ReqParseError); + Semver(semver::SemVerError); + Ignore(ignore::Error); + Io(io::Error); + SerdeJson(serde_json::Error); + TomlSer(toml::ser::Error); + TomlDe(toml::de::Error); + ParseInt(num::ParseIntError); + ParseBool(str::ParseBoolError); + Parse(string::ParseError); + Git(git2::Error); + Curl(curl::Error); + } + + errors { + Internal(err: Box) { + description(err.description()) + display("{}", *err) + } + ProcessErrorKind(proc_err: ProcessError) { + description(&proc_err.desc) + display("{}", &proc_err.desc) + } + CargoTestErrorKind(test_err: CargoTestError) { + description(&test_err.desc) + display("{}", &test_err.desc) + } + HttpNot200(code: u32, url: String) { + description("failed to get a 200 response") + display("failed to get 200 response from `{}`, got {}", url, code) + } + } +} + +impl CargoError { + pub fn into_internal(self) -> Self { + CargoError(CargoErrorKind::Internal(Box::new(self.0)), self.1) + } + + fn is_human(&self) -> bool { + match self.0 { + CargoErrorKind::Msg(_) | + CargoErrorKind::TomlSer(_) | + CargoErrorKind::TomlDe(_) | + CargoErrorKind::Curl(_) | + CargoErrorKind::HttpNot200(..) | + CargoErrorKind::ProcessErrorKind(_) | + CargoErrorKind::CrateRegistry(_) => true, + CargoErrorKind::ParseSemver(_) | + CargoErrorKind::Semver(_) | + CargoErrorKind::Ignore(_) | + CargoErrorKind::Io(_) | + CargoErrorKind::SerdeJson(_) | + CargoErrorKind::ParseInt(_) | + CargoErrorKind::ParseBool(_) | + CargoErrorKind::Parse(_) | + CargoErrorKind::Git(_) | + CargoErrorKind::Internal(_) | + CargoErrorKind::CargoTestErrorKind(_) | + CargoErrorKind::__Nonexhaustive { .. } => false + } + } +} + + +// ============================================================================= +// Process errors +#[derive(Debug)] +pub struct ProcessError { + pub desc: String, + pub exit: Option, + pub output: Option, +} + +// ============================================================================= +// Cargo test errors. + +/// Error when testcases fail +#[derive(Debug)] +pub struct CargoTestError { + pub test: Test, + pub desc: String, + pub exit: Option, + pub causes: Vec, +} + +#[derive(Debug)] +pub enum Test { + Multiple, + Doc, + UnitTest(TargetKind, String) +} + +impl CargoTestError { + pub fn new(test: Test, errors: Vec) -> Self { + if errors.is_empty() { + panic!("Cannot create CargoTestError from empty Vec") + } + let desc = errors.iter().map(|error| error.desc.clone()) + .collect::>() + .join("\n"); + CargoTestError { + test: test, + desc: desc, + exit: errors[0].exit, + causes: errors, + } + } + + pub fn hint(&self) -> String { + match self.test { + Test::UnitTest(ref kind, ref name) => { + match *kind { + TargetKind::Bench => format!("test failed, to rerun pass '--bench {}'", name), + TargetKind::Bin => format!("test failed, to rerun pass '--bin {}'", name), + TargetKind::Lib(_) => "test failed, to rerun pass '--lib'".into(), + TargetKind::Test => format!("test failed, to rerun pass '--test {}'", name), + TargetKind::ExampleBin | TargetKind::ExampleLib(_) => + format!("test failed, to rerun pass '--example {}", name), + _ => "test failed.".into() + } + }, + Test::Doc => "test failed, to rerun pass '--doc'".into(), + _ => "test failed.".into() + } + } +} + +// ============================================================================= +// CLI errors + +pub type CliResult = Result<(), CliError>; + +#[derive(Debug)] +pub struct CliError { + pub error: Option, + pub unknown: bool, + pub exit_code: i32 +} + +impl Error for CliError { + fn description(&self) -> &str { + self.error.as_ref().map(|e| e.description()) + .unwrap_or("unknown cli error") + } + + fn cause(&self) -> Option<&Error> { + self.error.as_ref().and_then(|e| e.cause()) + } +} + +impl fmt::Display for CliError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(ref error) = self.error { + error.fmt(f) + } else { + self.description().fmt(f) + } + } +} + +impl CliError { + pub fn new(error: CargoError, code: i32) -> CliError { + let human = &error.is_human(); + CliError { error: Some(error), exit_code: code, unknown: !human } + } + + pub fn code(code: i32) -> CliError { + CliError { error: None, exit_code: code, unknown: false } + } +} + +impl From for CliError { + fn from(err: CargoError) -> CliError { + CliError::new(err, 101) + } +} + + +// ============================================================================= +// Construction helpers + +pub fn process_error(msg: &str, + status: Option<&ExitStatus>, + output: Option<&Output>) -> ProcessError +{ + let exit = match status { + Some(s) => status_to_string(s), + None => "never executed".to_string(), + }; + let mut desc = format!("{} ({})", &msg, exit); + + if let Some(out) = output { + match str::from_utf8(&out.stdout) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stdout\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + match str::from_utf8(&out.stderr) { + Ok(s) if !s.trim().is_empty() => { + desc.push_str("\n--- stderr\n"); + desc.push_str(s); + } + Ok(..) | Err(..) => {} + } + } + + return ProcessError { + desc: desc, + exit: status.cloned(), + output: output.cloned(), + }; + + #[cfg(unix)] + fn status_to_string(status: &ExitStatus) -> String { + use std::os::unix::process::*; + use libc; + + if let Some(signal) = status.signal() { + let name = match signal as libc::c_int { + libc::SIGABRT => ", SIGABRT: process abort signal", + libc::SIGALRM => ", SIGALRM: alarm clock", + libc::SIGFPE => ", SIGFPE: erroneous arithmetic operation", + libc::SIGHUP => ", SIGHUP: hangup", + libc::SIGILL => ", SIGILL: illegal instruction", + libc::SIGINT => ", SIGINT: terminal interrupt signal", + libc::SIGKILL => ", SIGKILL: kill", + libc::SIGPIPE => ", SIGPIPE: write on a pipe with no one to read", + libc::SIGQUIT => ", SIGQUIT: terminal quite signal", + libc::SIGSEGV => ", SIGSEGV: invalid memory reference", + libc::SIGTERM => ", SIGTERM: termination signal", + libc::SIGBUS => ", SIGBUS: access to undefined memory", + #[cfg(not(target_os = "haiku"))] + libc::SIGSYS => ", SIGSYS: bad system call", + libc::SIGTRAP => ", SIGTRAP: trace/breakpoint trap", + _ => "", + }; + format!("signal: {}{}", signal, name) + } else { + status.to_string() + } + } + + #[cfg(windows)] + fn status_to_string(status: &ExitStatus) -> String { + status.to_string() + } +} + +pub fn internal(error: S) -> CargoError { + _internal(&error) +} + +fn _internal(error: &fmt::Display) -> CargoError { + CargoError::from_kind(error.to_string().into()).into_internal() +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/flock.rs b/collector/compile-benchmarks/cargo/src/cargo/util/flock.rs new file mode 100644 index 000000000..9f6ae48ea --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/flock.rs @@ -0,0 +1,344 @@ +use std::fs::{self, File, OpenOptions}; +use std::io::*; +use std::io; +use std::path::{Path, PathBuf, Display}; + +use termcolor::Color::Cyan; +use fs2::{FileExt, lock_contended_error}; +#[allow(unused_imports)] +use libc; + +use util::Config; +use util::errors::{CargoResult, CargoResultExt}; + +pub struct FileLock { + f: Option, + path: PathBuf, + state: State, +} + +#[derive(PartialEq)] +enum State { + Unlocked, + Shared, + Exclusive, +} + +impl FileLock { + /// Returns the underlying file handle of this lock. + pub fn file(&self) -> &File { + self.f.as_ref().unwrap() + } + + /// Returns the underlying path that this lock points to. + /// + /// Note that special care must be taken to ensure that the path is not + /// referenced outside the lifetime of this lock. + pub fn path(&self) -> &Path { + assert!(self.state != State::Unlocked); + &self.path + } + + /// Returns the parent path containing this file + pub fn parent(&self) -> &Path { + assert!(self.state != State::Unlocked); + self.path.parent().unwrap() + } + + /// Removes all sibling files to this locked file. + /// + /// This can be useful if a directory is locked with a sentinel file but it + /// needs to be cleared out as it may be corrupt. + pub fn remove_siblings(&self) -> io::Result<()> { + let path = self.path(); + for entry in path.parent().unwrap().read_dir()? { + let entry = entry?; + if Some(&entry.file_name()[..]) == path.file_name() { + continue + } + let kind = entry.file_type()?; + if kind.is_dir() { + fs::remove_dir_all(entry.path())?; + } else { + fs::remove_file(entry.path())?; + } + } + Ok(()) + } +} + +impl Read for FileLock { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + self.file().read(buf) + } +} + +impl Seek for FileLock { + fn seek(&mut self, to: SeekFrom) -> io::Result { + self.file().seek(to) + } +} + +impl Write for FileLock { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.file().write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.file().flush() + } +} + +impl Drop for FileLock { + fn drop(&mut self) { + if self.state != State::Unlocked { + if let Some(f) = self.f.take() { + let _ = f.unlock(); + } + } + } +} + +/// A "filesystem" is intended to be a globally shared, hence locked, resource +/// in Cargo. +/// +/// The `Path` of a filesystem cannot be learned unless it's done in a locked +/// fashion, and otherwise functions on this structure are prepared to handle +/// concurrent invocations across multiple instances of Cargo. +#[derive(Clone, Debug)] +pub struct Filesystem { + root: PathBuf, +} + +impl Filesystem { + /// Creates a new filesystem to be rooted at the given path. + pub fn new(path: PathBuf) -> Filesystem { + Filesystem { root: path } + } + + /// Like `Path::join`, creates a new filesystem rooted at this filesystem + /// joined with the given path. + pub fn join>(&self, other: T) -> Filesystem { + Filesystem::new(self.root.join(other)) + } + + /// Like `Path::push`, pushes a new path component onto this filesystem. + pub fn push>(&mut self, other: T) { + self.root.push(other); + } + + /// Consumes this filesystem and returns the underlying `PathBuf`. + /// + /// Note that this is a relatively dangerous operation and should be used + /// with great caution!. + pub fn into_path_unlocked(self) -> PathBuf { + self.root + } + + /// Creates the directory pointed to by this filesystem. + /// + /// Handles errors where other Cargo processes are also attempting to + /// concurrently create this directory. + pub fn create_dir(&self) -> io::Result<()> { + create_dir_all(&self.root) + } + + /// Returns an adaptor that can be used to print the path of this + /// filesystem. + pub fn display(&self) -> Display { + self.root.display() + } + + /// Opens exclusive access to a file, returning the locked version of a + /// file. + /// + /// This function will create a file at `path` if it doesn't already exist + /// (including intermediate directories), and then it will acquire an + /// exclusive lock on `path`. If the process must block waiting for the + /// lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has + /// read/write access to the underlying file. + pub fn open_rw

(&self, + path: P, + config: &Config, + msg: &str) -> CargoResult + where P: AsRef + { + self.open(path.as_ref(), + OpenOptions::new().read(true).write(true).create(true), + State::Exclusive, + config, + msg) + } + + /// Opens shared access to a file, returning the locked version of a file. + /// + /// This function will fail if `path` doesn't already exist, but if it does + /// then it will acquire a shared lock on `path`. If the process must block + /// waiting for the lock, the `msg` is printed to `config`. + /// + /// The returned file can be accessed to look at the path and also has read + /// access to the underlying file. Any writes to the file will return an + /// error. + pub fn open_ro

(&self, + path: P, + config: &Config, + msg: &str) -> CargoResult + where P: AsRef + { + self.open(path.as_ref(), + OpenOptions::new().read(true), + State::Shared, + config, + msg) + } + + fn open(&self, + path: &Path, + opts: &OpenOptions, + state: State, + config: &Config, + msg: &str) -> CargoResult { + let path = self.root.join(path); + + // If we want an exclusive lock then if we fail because of NotFound it's + // likely because an intermediate directory didn't exist, so try to + // create the directory and then continue. + let f = opts.open(&path).or_else(|e| { + if e.kind() == io::ErrorKind::NotFound && state == State::Exclusive { + create_dir_all(path.parent().unwrap())?; + opts.open(&path) + } else { + Err(e) + } + }).chain_err(|| { + format!("failed to open: {}", path.display()) + })?; + match state { + State::Exclusive => { + acquire(config, msg, &path, + &|| f.try_lock_exclusive(), + &|| f.lock_exclusive())?; + } + State::Shared => { + acquire(config, msg, &path, + &|| f.try_lock_shared(), + &|| f.lock_shared())?; + } + State::Unlocked => {} + + } + Ok(FileLock { f: Some(f), path: path, state: state }) + } +} + +/// Acquires a lock on a file in a "nice" manner. +/// +/// Almost all long-running blocking actions in Cargo have a status message +/// associated with them as we're not sure how long they'll take. Whenever a +/// conflicted file lock happens, this is the case (we're not sure when the lock +/// will be released). +/// +/// This function will acquire the lock on a `path`, printing out a nice message +/// to the console if we have to wait for it. It will first attempt to use `try` +/// to acquire a lock on the crate, and in the case of contention it will emit a +/// status message based on `msg` to `config`'s shell, and then use `block` to +/// block waiting to acquire a lock. +/// +/// Returns an error if the lock could not be acquired or if any error other +/// than a contention error happens. +fn acquire(config: &Config, + msg: &str, + path: &Path, + try: &Fn() -> io::Result<()>, + block: &Fn() -> io::Result<()>) -> CargoResult<()> { + + // File locking on Unix is currently implemented via `flock`, which is known + // to be broken on NFS. We could in theory just ignore errors that happen on + // NFS, but apparently the failure mode [1] for `flock` on NFS is **blocking + // forever**, even if the nonblocking flag is passed! + // + // As a result, we just skip all file locks entirely on NFS mounts. That + // should avoid calling any `flock` functions at all, and it wouldn't work + // there anyway. + // + // [1]: https://github.com/rust-lang/cargo/issues/2615 + if is_on_nfs_mount(path) { + return Ok(()) + } + + match try() { + Ok(()) => return Ok(()), + + // In addition to ignoring NFS which is commonly not working we also + // just ignore locking on filesystems that look like they don't + // implement file locking. We detect that here via the return value of + // locking (e.g. inspecting errno). + #[cfg(unix)] + Err(ref e) if e.raw_os_error() == Some(libc::ENOTSUP) => return Ok(()), + + #[cfg(target_os = "linux")] + Err(ref e) if e.raw_os_error() == Some(libc::ENOSYS) => return Ok(()), + + Err(e) => { + if e.raw_os_error() != lock_contended_error().raw_os_error() { + return Err(e).chain_err(|| { + format!("failed to lock file: {}", path.display()) + }) + } + } + } + let msg = format!("waiting for file lock on {}", msg); + config.shell().status_with_color("Blocking", &msg, Cyan)?; + + return block().chain_err(|| { + format!("failed to lock file: {}", path.display()) + }); + + #[cfg(all(target_os = "linux", not(target_env = "musl")))] + fn is_on_nfs_mount(path: &Path) -> bool { + use std::ffi::CString; + use std::mem; + use std::os::unix::prelude::*; + + let path = match CString::new(path.as_os_str().as_bytes()) { + Ok(path) => path, + Err(_) => return false, + }; + + unsafe { + let mut buf: libc::statfs = mem::zeroed(); + let r = libc::statfs(path.as_ptr(), &mut buf); + + r == 0 && buf.f_type as u32 == libc::NFS_SUPER_MAGIC as u32 + } + } + + #[cfg(any(not(target_os = "linux"), target_env = "musl"))] + fn is_on_nfs_mount(_path: &Path) -> bool { + false + } +} + +fn create_dir_all(path: &Path) -> io::Result<()> { + match create_dir(path) { + Ok(()) => Ok(()), + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + if let Some(p) = path.parent() { + return create_dir_all(p).and_then(|()| create_dir(path)) + } + } + Err(e) + } + } +} + +fn create_dir(path: &Path) -> io::Result<()> { + match fs::create_dir(path) { + Ok(()) => Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), + Err(e) => Err(e), + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/graph.rs b/collector/compile-benchmarks/cargo/src/cargo/util/graph.rs new file mode 100644 index 000000000..d97b9d44d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/graph.rs @@ -0,0 +1,105 @@ +use std::fmt; +use std::hash::Hash; +use std::collections::hash_set::{HashSet, Iter}; +use std::collections::hash_map::{HashMap, Keys}; + +pub struct Graph { + nodes: HashMap> +} + +enum Mark { + InProgress, + Done +} + +pub type Nodes<'a, N> = Keys<'a, N, HashSet>; +pub type Edges<'a, N> = Iter<'a, N>; + +impl Graph { + pub fn new() -> Graph { + Graph { nodes: HashMap::new() } + } + + pub fn add(&mut self, node: N, children: &[N]) { + self.nodes.entry(node) + .or_insert_with(HashSet::new) + .extend(children.iter().cloned()); + } + + pub fn link(&mut self, node: N, child: N) { + self.nodes.entry(node).or_insert_with(HashSet::new).insert(child); + } + + pub fn get_nodes(&self) -> &HashMap> { + &self.nodes + } + + pub fn edges(&self, node: &N) -> Option> { + self.nodes.get(node).map(|set| set.iter()) + } + + pub fn sort(&self) -> Option> { + let mut ret = Vec::new(); + let mut marks = HashMap::new(); + + for node in self.nodes.keys() { + self.visit(node, &mut ret, &mut marks); + } + + Some(ret) + } + + fn visit(&self, node: &N, dst: &mut Vec, marks: &mut HashMap) { + if marks.contains_key(node) { + return; + } + + marks.insert(node.clone(), Mark::InProgress); + + for child in &self.nodes[node] { + self.visit(child, dst, marks); + } + + dst.push(node.clone()); + marks.insert(node.clone(), Mark::Done); + } + + pub fn iter(&self) -> Nodes { + self.nodes.keys() + } +} + +impl Default for Graph { + fn default() -> Graph { + Graph::new() + } +} + +impl fmt::Debug for Graph { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + writeln!(fmt, "Graph {{")?; + + for (n, e) in &self.nodes { + writeln!(fmt, " - {}", n)?; + + for n in e.iter() { + writeln!(fmt, " - {}", n)?; + } + } + + write!(fmt, "}}")?; + + Ok(()) + } +} + +impl PartialEq for Graph { + fn eq(&self, other: &Graph) -> bool { self.nodes.eq(&other.nodes) } +} +impl Eq for Graph {} + +impl Clone for Graph { + fn clone(&self) -> Graph { + Graph { nodes: self.nodes.clone() } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/hex.rs b/collector/compile-benchmarks/cargo/src/cargo/util/hex.rs new file mode 100644 index 000000000..71a4c112f --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/hex.rs @@ -0,0 +1,27 @@ +#![allow(deprecated)] + +use hex::ToHex; +use std::hash::{Hasher, Hash, SipHasher}; + +pub fn to_hex(num: u64) -> String { + [ + (num >> 0) as u8, + (num >> 8) as u8, + (num >> 16) as u8, + (num >> 24) as u8, + (num >> 32) as u8, + (num >> 40) as u8, + (num >> 48) as u8, + (num >> 56) as u8, + ].to_hex() +} + +pub fn hash_u64(hashable: &H) -> u64 { + let mut hasher = SipHasher::new_with_keys(0, 0); + hashable.hash(&mut hasher); + hasher.finish() +} + +pub fn short_hash(hashable: &H) -> String { + to_hex(hash_u64(hashable)) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/important_paths.rs b/collector/compile-benchmarks/cargo/src/cargo/util/important_paths.rs new file mode 100644 index 000000000..069979ea9 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/important_paths.rs @@ -0,0 +1,65 @@ +use std::fs; +use std::path::{Path, PathBuf}; +use util::errors::CargoResult; +use util::paths; + +/// Iteratively search for `file` in `pwd` and its parents, returning +/// the path of the directory. +pub fn find_project(pwd: &Path, file: &str) -> CargoResult { + find_project_manifest(pwd, file).map(|mut p| { + // remove the file, leaving just the directory + p.pop(); + p + }) +} + +/// Iteratively search for `file` in `pwd` and its parents, returning +/// the path to the file. +pub fn find_project_manifest(pwd: &Path, file: &str) -> CargoResult { + let mut current = pwd; + + loop { + let manifest = current.join(file); + if fs::metadata(&manifest).is_ok() { + return Ok(manifest) + } + + match current.parent() { + Some(p) => current = p, + None => break, + } + } + + bail!("could not find `{}` in `{}` or any parent directory", + file, pwd.display()) +} + +/// Find the root Cargo.toml +pub fn find_root_manifest_for_wd(manifest_path: Option, cwd: &Path) + -> CargoResult { + match manifest_path { + Some(path) => { + let absolute_path = paths::normalize_path(&cwd.join(&path)); + if !absolute_path.ends_with("Cargo.toml") { + bail!("the manifest-path must be a path to a Cargo.toml file") + } + if !fs::metadata(&absolute_path).is_ok() { + bail!("manifest path `{}` does not exist", path) + } + Ok(absolute_path) + }, + None => find_project_manifest(cwd, "Cargo.toml"), + } +} + +/// Return the path to the `file` in `pwd`, if it exists. +pub fn find_project_manifest_exact(pwd: &Path, file: &str) -> CargoResult { + let manifest = pwd.join(file); + + if fs::metadata(&manifest).is_ok() { + Ok(manifest) + } else { + Err(format!("Could not find `{}` in `{}`", + file, pwd.display()).into()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/job.rs b/collector/compile-benchmarks/cargo/src/cargo/util/job.rs new file mode 100644 index 000000000..06f51356d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/job.rs @@ -0,0 +1,260 @@ +//! Job management (mostly for windows) +//! +//! Most of the time when you're running cargo you expect Ctrl-C to actually +//! terminate the entire tree of processes in play, not just the one at the top +//! (cago). This currently works "by default" on Unix platforms because Ctrl-C +//! actually sends a signal to the *process group* rather than the parent +//! process, so everything will get torn down. On Windows, however, this does +//! not happen and Ctrl-C just kills cargo. +//! +//! To achieve the same semantics on Windows we use Job Objects to ensure that +//! all processes die at the same time. Job objects have a mode of operation +//! where when all handles to the object are closed it causes all child +//! processes associated with the object to be terminated immediately. +//! Conveniently whenever a process in the job object spawns a new process the +//! child will be associated with the job object as well. This means if we add +//! ourselves to the job object we create then everything will get torn down! + +pub use self::imp::Setup; + +pub fn setup() -> Option { + unsafe { imp::setup() } +} + +#[cfg(unix)] +mod imp { + use std::env; + use libc; + + pub type Setup = (); + + pub unsafe fn setup() -> Option<()> { + // There's a test case for the behavior of + // when-cargo-is-killed-subprocesses-are-also-killed, but that requires + // one cargo spawned to become its own session leader, so we do that + // here. + if env::var("__CARGO_TEST_SETSID_PLEASE_DONT_USE_ELSEWHERE").is_ok() { + libc::setsid(); + } + Some(()) + } +} + +#[cfg(windows)] +mod imp { + extern crate kernel32; + extern crate winapi; + extern crate psapi; + + use std::ffi::OsString; + use std::io; + use std::mem; + use std::os::windows::prelude::*; + + pub struct Setup { + job: Handle, + } + + pub struct Handle { + inner: winapi::HANDLE, + } + + fn last_err() -> io::Error { + io::Error::last_os_error() + } + + pub unsafe fn setup() -> Option { + // Creates a new job object for us to use and then adds ourselves to it. + // Note that all errors are basically ignored in this function, + // intentionally. Job objects are "relatively new" in Windows, + // particularly the ability to support nested job objects. Older + // Windows installs don't support this ability. We probably don't want + // to force Cargo to abort in this situation or force others to *not* + // use job objects, so we instead just ignore errors and assume that + // we're otherwise part of someone else's job object in this case. + + let job = kernel32::CreateJobObjectW(0 as *mut _, 0 as *const _); + if job.is_null() { + return None + } + let job = Handle { inner: job }; + + // Indicate that when all handles to the job object are gone that all + // process in the object should be killed. Note that this includes our + // entire process tree by default because we've added ourselves and and + // our children will reside in the job once we spawn a process. + let mut info: winapi::JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + info.BasicLimitInformation.LimitFlags = + winapi::JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + let r = kernel32::SetInformationJobObject(job.inner, + winapi::JobObjectExtendedLimitInformation, + &mut info as *mut _ as winapi::LPVOID, + mem::size_of_val(&info) as winapi::DWORD); + if r == 0 { + return None + } + + // Assign our process to this job object, meaning that our children will + // now live or die based on our existence. + let me = kernel32::GetCurrentProcess(); + let r = kernel32::AssignProcessToJobObject(job.inner, me); + if r == 0 { + return None + } + + Some(Setup { job: job }) + } + + impl Drop for Setup { + fn drop(&mut self) { + // This is a litte subtle. By default if we are terminated then all + // processes in our job object are terminated as well, but we + // intentionally want to whitelist some processes to outlive our job + // object (see below). + // + // To allow for this, we manually kill processes instead of letting + // the job object kill them for us. We do this in a loop to handle + // processes spawning other processes. + // + // Finally once this is all done we know that the only remaining + // ones are ourselves and the whitelisted processes. The destructor + // here then configures our job object to *not* kill everything on + // close, then closes the job object. + unsafe { + while self.kill_remaining() { + info!("killed some, going for more"); + } + + let mut info: winapi::JOBOBJECT_EXTENDED_LIMIT_INFORMATION; + info = mem::zeroed(); + let r = kernel32::SetInformationJobObject( + self.job.inner, + winapi::JobObjectExtendedLimitInformation, + &mut info as *mut _ as winapi::LPVOID, + mem::size_of_val(&info) as winapi::DWORD); + if r == 0 { + info!("failed to configure job object to defaults: {}", + last_err()); + } + } + } + } + + impl Setup { + unsafe fn kill_remaining(&mut self) -> bool { + #[repr(C)] + struct Jobs { + header: winapi::JOBOBJECT_BASIC_PROCESS_ID_LIST, + list: [winapi::ULONG_PTR; 1024], + } + + let mut jobs: Jobs = mem::zeroed(); + let r = kernel32::QueryInformationJobObject( + self.job.inner, + winapi::JobObjectBasicProcessIdList, + &mut jobs as *mut _ as winapi::LPVOID, + mem::size_of_val(&jobs) as winapi::DWORD, + 0 as *mut _); + if r == 0 { + info!("failed to query job object: {}", last_err()); + return false + } + + let mut killed = false; + let list = &jobs.list[..jobs.header.NumberOfProcessIdsInList as usize]; + assert!(list.len() > 0); + info!("found {} remaining processes", list.len() - 1); + + let list = list.iter().filter(|&&id| { + // let's not kill ourselves + id as winapi::DWORD != kernel32::GetCurrentProcessId() + }).filter_map(|&id| { + // Open the process with the necessary rights, and if this + // fails then we probably raced with the process exiting so we + // ignore the problem. + let flags = winapi::PROCESS_QUERY_INFORMATION | + winapi::PROCESS_TERMINATE | + winapi::SYNCHRONIZE; + let p = kernel32::OpenProcess(flags, + winapi::FALSE, + id as winapi::DWORD); + if p.is_null() { + None + } else { + Some(Handle { inner: p }) + } + }).filter(|p| { + // Test if this process was actually in the job object or not. + // If it's not then we likely raced with something else + // recycling this PID, so we just skip this step. + let mut res = 0; + let r = kernel32::IsProcessInJob(p.inner, self.job.inner, &mut res); + if r == 0 { + info!("failed to test is process in job: {}", last_err()); + return false + } + res == winapi::TRUE + }); + + + for p in list { + // Load the file which this process was spawned from. We then + // later use this for identification purposes. + let mut buf = [0; 1024]; + let r = psapi::GetProcessImageFileNameW(p.inner, + buf.as_mut_ptr(), + buf.len() as winapi::DWORD); + if r == 0 { + info!("failed to get image name: {}", last_err()); + continue + } + let s = OsString::from_wide(&buf[..r as usize]); + info!("found remaining: {:?}", s); + + // And here's where we find the whole purpose for this + // function! Currently, our only whitelisted process is + // `mspdbsrv.exe`, and more details about that can be found + // here: + // + // https://github.com/rust-lang/rust/issues/33145 + // + // The gist of it is that all builds on one machine use the + // same `mspdbsrv.exe` instance. If we were to kill this + // instance then we could erroneously cause other builds to + // fail. + if let Some(s) = s.to_str() { + if s.contains("mspdbsrv") { + info!("\toops, this is mspdbsrv"); + continue + } + } + + // Ok, this isn't mspdbsrv, let's kill the process. After we + // kill it we wait on it to ensure that the next time around in + // this function we're not going to see it again. + let r = kernel32::TerminateProcess(p.inner, 1); + if r == 0 { + info!("\tfailed to kill subprocess: {}", last_err()); + info!("\tassuming subprocess is dead..."); + } else { + info!("\tterminated subprocess"); + } + let r = kernel32::WaitForSingleObject(p.inner, winapi::INFINITE); + if r != 0 { + info!("failed to wait for process to die: {}", last_err()); + return false + } + killed = true; + } + + return killed + } + } + + impl Drop for Handle { + fn drop(&mut self) { + unsafe { kernel32::CloseHandle(self.inner); } + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/lazy_cell.rs b/collector/compile-benchmarks/cargo/src/cargo/util/lazy_cell.rs new file mode 100644 index 000000000..607f2ef98 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/lazy_cell.rs @@ -0,0 +1,73 @@ +//! A lazily fill Cell, but with frozen contents. +//! +//! With a `RefCell`, the inner contents cannot be borrowed for the lifetime of +//! the entire object, but only of the borrows returned. A `LazyCell` is a +//! variation on `RefCell` which allows borrows tied to the lifetime of the +//! outer object. +//! +//! The limitation of a `LazyCell` is that after initialized, it can never be +//! modified unless you've otherwise got a `&mut` reference + +use std::cell::UnsafeCell; + +#[derive(Debug)] +pub struct LazyCell { + inner: UnsafeCell>, +} + +impl LazyCell { + /// Creates a new empty lazy cell. + pub fn new() -> LazyCell { + LazyCell { inner: UnsafeCell::new(None) } + } + + /// Put a value into this cell. + /// + /// This function will fail if the cell has already been filled. + pub fn fill(&self, t: T) -> Result<(), T> { + unsafe { + let slot = self.inner.get(); + if (*slot).is_none() { + *slot = Some(t); + Ok(()) + } else { + Err(t) + } + } + } + + /// Borrows the contents of this lazy cell for the duration of the cell + /// itself. + /// + /// This function will return `Some` if the cell has been previously + /// initialized, and `None` if it has not yet been initialized. + pub fn borrow(&self) -> Option<&T> { + unsafe { + (*self.inner.get()).as_ref() + } + } + + /// Same as `borrow`, but the mutable version + pub fn borrow_mut(&mut self) -> Option<&mut T> { + unsafe { + (*self.inner.get()).as_mut() + } + } + + /// Consumes this `LazyCell`, returning the underlying value. + pub fn into_inner(self) -> Option { + unsafe { + self.inner.into_inner() + } + } + + /// Borrows the contents of this lazy cell, initializing it if necessary. + pub fn get_or_try_init(&self, init: F) -> Result<&T, Error> + where F: FnOnce() -> Result + { + if self.borrow().is_none() && self.fill(init()?).is_err() { + unreachable!(); + } + Ok(self.borrow().unwrap()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/lev_distance.rs b/collector/compile-benchmarks/cargo/src/cargo/util/lev_distance.rs new file mode 100644 index 000000000..d55a3443a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/lev_distance.rs @@ -0,0 +1,53 @@ +use std::cmp; + +pub fn lev_distance(me: &str, t: &str) -> usize { + if me.is_empty() { return t.chars().count(); } + if t.is_empty() { return me.chars().count(); } + + let mut dcol = (0..t.len() + 1).collect::>(); + let mut t_last = 0; + + for (i, sc) in me.chars().enumerate() { + + let mut current = i; + dcol[0] = current + 1; + + for (j, tc) in t.chars().enumerate() { + + let next = dcol[j + 1]; + + if sc == tc { + dcol[j + 1] = current; + } else { + dcol[j + 1] = cmp::min(current, next); + dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1; + } + + current = next; + t_last = j; + } + } + + dcol[t_last + 1] +} + +#[test] +fn test_lev_distance() { + use std::char::{ from_u32, MAX }; + // Test bytelength agnosticity + for c in (0u32..MAX as u32) + .filter_map(|i| from_u32(i)) + .map(|i| i.to_string()) { + assert_eq!(lev_distance(&c, &c), 0); + } + + let a = "\nMäry häd ä little lämb\n\nLittle lämb\n"; + let b = "\nMary häd ä little lämb\n\nLittle lämb\n"; + let c = "Mary häd ä little lämb\n\nLittle lämb\n"; + assert_eq!(lev_distance(a, b), 1); + assert_eq!(lev_distance(b, a), 1); + assert_eq!(lev_distance(a, c), 2); + assert_eq!(lev_distance(c, a), 2); + assert_eq!(lev_distance(b, c), 1); + assert_eq!(lev_distance(c, b), 1); +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/machine_message.rs b/collector/compile-benchmarks/cargo/src/cargo/util/machine_message.rs new file mode 100644 index 000000000..ddfeed7de --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/machine_message.rs @@ -0,0 +1,58 @@ +use serde::ser; +use serde_json::{self, Value}; + +use core::{PackageId, Target, Profile}; + +pub trait Message: ser::Serialize { + fn reason(&self) -> &str; +} + +pub fn emit(t: &T) { + let mut json: Value = serde_json::to_value(t).unwrap(); + json["reason"] = json!(t.reason()); + println!("{}", json); +} + +#[derive(Serialize)] +pub struct FromCompiler<'a> { + pub package_id: &'a PackageId, + pub target: &'a Target, + pub message: serde_json::Value, +} + +impl<'a> Message for FromCompiler<'a> { + fn reason(&self) -> &str { + "compiler-message" + } +} + +#[derive(Serialize)] +pub struct Artifact<'a> { + pub package_id: &'a PackageId, + pub target: &'a Target, + pub profile: &'a Profile, + pub features: Vec, + pub filenames: Vec, + pub fresh: bool, +} + +impl<'a> Message for Artifact<'a> { + fn reason(&self) -> &str { + "compiler-artifact" + } +} + +#[derive(Serialize)] +pub struct BuildScript<'a> { + pub package_id: &'a PackageId, + pub linked_libs: &'a [String], + pub linked_paths: &'a [String], + pub cfgs: &'a [String], + pub env: &'a [(String, String)], +} + +impl<'a> Message for BuildScript<'a> { + fn reason(&self) -> &str { + "build-script-executed" + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/util/mod.rs new file mode 100644 index 000000000..9c1c9c5e0 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/mod.rs @@ -0,0 +1,44 @@ +pub use self::cfg::{Cfg, CfgExpr}; +pub use self::config::{Config, ConfigValue, homedir}; +pub use self::dependency_queue::{DependencyQueue, Fresh, Dirty, Freshness}; +pub use self::errors::{CargoResult, CargoResultExt, CargoError, CargoErrorKind, Test, CliResult}; +pub use self::errors::{CliError, ProcessError, CargoTestError}; +pub use self::errors::{process_error, internal}; +pub use self::flock::{FileLock, Filesystem}; +pub use self::graph::Graph; +pub use self::hex::{to_hex, short_hash, hash_u64}; +pub use self::lazy_cell::LazyCell; +pub use self::lev_distance::{lev_distance}; +pub use self::paths::{join_paths, path2bytes, bytes2path, dylib_path}; +pub use self::paths::{normalize_path, dylib_path_envvar, without_prefix}; +pub use self::process_builder::{process, ProcessBuilder}; +pub use self::rustc::Rustc; +pub use self::sha256::Sha256; +pub use self::to_semver::ToSemver; +pub use self::to_url::ToUrl; +pub use self::vcs::{GitRepo, HgRepo, PijulRepo, FossilRepo}; +pub use self::read2::read2; + +pub mod config; +pub mod errors; +pub mod graph; +pub mod hex; +pub mod important_paths; +pub mod job; +pub mod lev_distance; +pub mod machine_message; +pub mod network; +pub mod paths; +pub mod process_builder; +pub mod profile; +pub mod to_semver; +pub mod to_url; +pub mod toml; +mod cfg; +mod dependency_queue; +mod rustc; +mod sha256; +mod vcs; +mod lazy_cell; +mod flock; +mod read2; diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/network.rs b/collector/compile-benchmarks/cargo/src/cargo/util/network.rs new file mode 100644 index 000000000..4c7c4dcb5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/network.rs @@ -0,0 +1,106 @@ +use std; +use std::error::Error; + +use error_chain::ChainedError; +use util::Config; +use util::errors::{CargoError, CargoErrorKind, CargoResult}; +use git2; + +fn maybe_spurious(err: &E) -> bool + where E: ChainedError + 'static { + //Error inspection in non-verbose mode requires inspecting the + //error kind to avoid printing Internal errors. The downcasting + //machinery requires &(Error + 'static), but the iterator (and + //underlying `cause`) return &Error. Because the borrows are + //constrained to this handling method, and because the original + //error object is constrained to be 'static, we're casting away + //the borrow's actual lifetime for purposes of downcasting and + //inspecting the error chain + unsafe fn extend_lifetime(r: &Error) -> &(Error + 'static) { + std::mem::transmute::<&Error, &Error>(r) + } + + for e in err.iter() { + let e = unsafe { extend_lifetime(e) }; + if let Some(cargo_err) = e.downcast_ref::() { + match cargo_err.kind() { + &CargoErrorKind::Git(ref git_err) => { + match git_err.class() { + git2::ErrorClass::Net | + git2::ErrorClass::Os => return true, + _ => () + } + } + &CargoErrorKind::Curl(ref curl_err) + if curl_err.is_couldnt_connect() || + curl_err.is_couldnt_resolve_proxy() || + curl_err.is_couldnt_resolve_host() || + curl_err.is_operation_timedout() || + curl_err.is_recv_error() => { + return true + } + &CargoErrorKind::HttpNot200(code, ref _url) if 500 <= code && code < 600 => { + return true + } + _ => () + } + } + } + false +} + +/// Wrapper method for network call retry logic. +/// +/// Retry counts provided by Config object `net.retry`. Config shell outputs +/// a warning on per retry. +/// +/// Closure must return a `CargoResult`. +/// +/// # Examples +/// +/// ```ignore +/// use util::network; +/// cargo_result = network.with_retry(&config, || something.download()); +/// ``` +pub fn with_retry(config: &Config, mut callback: F) -> CargoResult + where F: FnMut() -> CargoResult +{ + let mut remaining = config.net_retry()?; + loop { + match callback() { + Ok(ret) => return Ok(ret), + Err(ref e) if maybe_spurious(e) && remaining > 0 => { + let msg = format!("spurious network error ({} tries \ + remaining): {}", remaining, e); + config.shell().warn(msg)?; + remaining -= 1; + } + //todo impl from + Err(e) => return Err(e.into()), + } + } +} +#[test] +fn with_retry_repeats_the_call_then_works() { + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + let error1 = CargoErrorKind::HttpNot200(501, "Uri".to_string()).into(); + let error2 = CargoErrorKind::HttpNot200(502, "Uri".to_string()).into(); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} + +#[test] +fn with_retry_finds_nested_spurious_errors() { + //Error HTTP codes (5xx) are considered maybe_spurious and will prompt retry + //String error messages are not considered spurious + let error1 : CargoError = CargoErrorKind::HttpNot200(501, "Uri".to_string()).into(); + let error1 = CargoError::with_chain(error1, "A non-spurious wrapping err"); + let error2 = CargoError::from_kind(CargoErrorKind::HttpNot200(502, "Uri".to_string())); + let error2 = CargoError::with_chain(error2, "A second chained error"); + let mut results: Vec> = vec![Ok(()), Err(error1), Err(error2)]; + let config = Config::default().unwrap(); + let result = with_retry(&config, || results.pop().unwrap()); + assert_eq!(result.unwrap(), ()) +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/paths.rs b/collector/compile-benchmarks/cargo/src/cargo/util/paths.rs new file mode 100644 index 000000000..ea6a66958 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/paths.rs @@ -0,0 +1,183 @@ +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs::File; +use std::fs::OpenOptions; +use std::io::prelude::*; +use std::path::{Path, PathBuf, Component}; + +use util::{internal, CargoResult}; +use util::errors::CargoResultExt; + +pub fn join_paths>(paths: &[T], env: &str) -> CargoResult { + env::join_paths(paths.iter()).or_else(|e| { + let paths = paths.iter().map(Path::new).collect::>(); + Err(internal(format!("failed to join path array: {:?}", paths))).chain_err(|| { + format!("failed to join search paths together: {}\n\ + Does ${} have an unterminated quote character?", + e, env) + }) + }) +} + +pub fn dylib_path_envvar() -> &'static str { + if cfg!(windows) {"PATH"} + else if cfg!(target_os = "macos") {"DYLD_LIBRARY_PATH"} + else {"LD_LIBRARY_PATH"} +} + +pub fn dylib_path() -> Vec { + match env::var_os(dylib_path_envvar()) { + Some(var) => env::split_paths(&var).collect(), + None => Vec::new(), + } +} + +pub fn normalize_path(path: &Path) -> PathBuf { + let mut components = path.components().peekable(); + let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek() + .cloned() { + components.next(); + PathBuf::from(c.as_os_str()) + } else { + PathBuf::new() + }; + + for component in components { + match component { + Component::Prefix(..) => unreachable!(), + Component::RootDir => { ret.push(component.as_os_str()); } + Component::CurDir => {} + Component::ParentDir => { ret.pop(); } + Component::Normal(c) => { ret.push(c); } + } + } + ret +} + +pub fn without_prefix<'a>(long_path: &'a Path, prefix: &'a Path) -> Option<&'a Path> { + let mut a = long_path.components(); + let mut b = prefix.components(); + loop { + match b.next() { + Some(y) => match a.next() { + Some(x) if x == y => continue, + _ => return None, + }, + None => return Some(a.as_path()), + } + } +} + +pub fn read(path: &Path) -> CargoResult { + match String::from_utf8(read_bytes(path)?) { + Ok(s) => Ok(s), + Err(_) => bail!("path at `{}` was not valid utf-8", path.display()), + } +} + +pub fn read_bytes(path: &Path) -> CargoResult> { + (|| -> CargoResult<_> { + let mut ret = Vec::new(); + let mut f = File::open(path)?; + if let Ok(m) = f.metadata() { + ret.reserve(m.len() as usize + 1); + } + f.read_to_end(&mut ret)?; + Ok(ret) + })().chain_err(|| { + format!("failed to read `{}`", path.display()) + }) +} + +pub fn write(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = File::create(path)?; + f.write_all(contents)?; + Ok(()) + })().chain_err(|| { + format!("failed to write `{}`", path.display()) + }) +} + +pub fn append(path: &Path, contents: &[u8]) -> CargoResult<()> { + (|| -> CargoResult<()> { + let mut f = OpenOptions::new() + .write(true) + .append(true) + .create(true) + .open(path)?; + + f.write_all(contents)?; + Ok(()) + })().chain_err(|| { + internal(format!("failed to write `{}`", path.display())) + }) +} + +#[cfg(unix)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + use std::os::unix::prelude::*; + Ok(path.as_os_str().as_bytes()) +} +#[cfg(windows)] +pub fn path2bytes(path: &Path) -> CargoResult<&[u8]> { + match path.as_os_str().to_str() { + Some(s) => Ok(s.as_bytes()), + None => Err(format!("invalid non-unicode path: {}", + path.display()).into()) + } +} + +#[cfg(unix)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::os::unix::prelude::*; + use std::ffi::OsStr; + Ok(PathBuf::from(OsStr::from_bytes(bytes))) +} +#[cfg(windows)] +pub fn bytes2path(bytes: &[u8]) -> CargoResult { + use std::str; + match str::from_utf8(bytes) { + Ok(s) => Ok(PathBuf::from(s)), + Err(..) => Err("invalid non-unicode path".into()), + } +} + +pub fn ancestors(path: &Path) -> PathAncestors { + PathAncestors::new(path) +} + +pub struct PathAncestors<'a> { + current: Option<&'a Path>, + stop_at: Option +} + +impl<'a> PathAncestors<'a> { + fn new(path: &Path) -> PathAncestors { + PathAncestors { + current: Some(path), + //HACK: avoid reading `~/.cargo/config` when testing Cargo itself. + stop_at: env::var("__CARGO_TEST_ROOT").ok().map(PathBuf::from), + } + } +} + +impl<'a> Iterator for PathAncestors<'a> { + type Item = &'a Path; + + fn next(&mut self) -> Option<&'a Path> { + if let Some(path) = self.current { + self.current = path.parent(); + + if let Some(ref stop_at) = self.stop_at { + if path == stop_at { + self.current = None; + } + } + + Some(path) + } else { + None + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/process_builder.rs b/collector/compile-benchmarks/cargo/src/cargo/util/process_builder.rs new file mode 100644 index 000000000..ab5de7f08 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/process_builder.rs @@ -0,0 +1,312 @@ +use std::collections::HashMap; +use std::env; +use std::ffi::{OsString, OsStr}; +use std::fmt; +use std::path::Path; +use std::process::{Command, Stdio, Output}; + +use jobserver::Client; +use shell_escape::escape; + +use util::{CargoResult, CargoResultExt, CargoError, process_error, read2}; +use util::errors::CargoErrorKind; + +/// A builder object for an external process, similar to `std::process::Command`. +#[derive(Clone, Debug)] +pub struct ProcessBuilder { + /// The program to execute. + program: OsString, + /// A list of arguments to pass to the program. + args: Vec, + /// Any environment variables that should be set for the program. + env: HashMap>, + /// Which directory to run the program from. + cwd: Option, + /// The `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + jobserver: Option, +} + +impl fmt::Display for ProcessBuilder { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "`{}", self.program.to_string_lossy())?; + + for arg in &self.args { + write!(f, " {}", escape(arg.to_string_lossy()))?; + } + + write!(f, "`") + } +} + +impl ProcessBuilder { + /// (chainable) Set the executable for the process. + pub fn program>(&mut self, program: T) -> &mut ProcessBuilder { + self.program = program.as_ref().to_os_string(); + self + } + + /// (chainable) Add an arg to the args list. + pub fn arg>(&mut self, arg: T) -> &mut ProcessBuilder { + self.args.push(arg.as_ref().to_os_string()); + self + } + + /// (chainable) Add many args to the args list. + pub fn args>(&mut self, arguments: &[T]) -> &mut ProcessBuilder { + self.args.extend(arguments.iter().map(|t| { + t.as_ref().to_os_string() + })); + self + } + + /// (chainable) Replace args with new args list + pub fn args_replace>(&mut self, arguments: &[T]) -> &mut ProcessBuilder { + self.args = arguments.iter().map(|t| { + t.as_ref().to_os_string() + }).collect(); + self + } + + /// (chainable) Set the current working directory of the process + pub fn cwd>(&mut self, path: T) -> &mut ProcessBuilder { + self.cwd = Some(path.as_ref().to_os_string()); + self + } + + /// (chainable) Set an environment variable for the process. + pub fn env>(&mut self, key: &str, + val: T) -> &mut ProcessBuilder { + self.env.insert(key.to_string(), Some(val.as_ref().to_os_string())); + self + } + + /// (chainable) Unset an environment variable for the process. + pub fn env_remove(&mut self, key: &str) -> &mut ProcessBuilder { + self.env.insert(key.to_string(), None); + self + } + + /// Get the executable name. + pub fn get_program(&self) -> &OsString { + &self.program + } + + /// Get the program arguments + pub fn get_args(&self) -> &[OsString] { + &self.args + } + + /// Get the current working directory for the process + pub fn get_cwd(&self) -> Option<&Path> { + self.cwd.as_ref().map(Path::new) + } + + /// Get an environment variable as the process will see it (will inherit from environment + /// unless explicitally unset). + pub fn get_env(&self, var: &str) -> Option { + self.env.get(var).cloned().or_else(|| Some(env::var_os(var))) + .and_then(|s| s) + } + + /// Get all environment variables explicitally set or unset for the process (not inherited + /// vars). + pub fn get_envs(&self) -> &HashMap> { &self.env } + + /// Set the `make` jobserver. See the [jobserver crate][jobserver_docs] for + /// more information. + /// + /// [jobserver_docs]: https://docs.rs/jobserver/0.1.6/jobserver/ + pub fn inherit_jobserver(&mut self, jobserver: &Client) -> &mut Self { + self.jobserver = Some(jobserver.clone()); + self + } + + /// Run the process, waiting for completion, and mapping non-success exit codes to an error. + pub fn exec(&self) -> CargoResult<()> { + let mut command = self.build_command(); + let exit = command.status().chain_err(|| { + CargoErrorKind::ProcessErrorKind( + process_error(&format!("could not execute process `{}`", + self.debug_string()), None, None)) + })?; + + if exit.success() { + Ok(()) + } else { + Err(CargoErrorKind::ProcessErrorKind(process_error( + &format!("process didn't exit successfully: `{}`", self.debug_string()), + Some(&exit), None)).into()) + } + } + + /// On unix, executes the process using the unix syscall `execvp`, which will block this + /// process, and will only return if there is an error. On windows this is a synonym for + /// `exec`. + #[cfg(unix)] + pub fn exec_replace(&self) -> CargoResult<()> { + use std::os::unix::process::CommandExt; + + let mut command = self.build_command(); + let error = command.exec(); + Err(CargoError::with_chain(error, + CargoErrorKind::ProcessErrorKind(process_error( + &format!("could not execute process `{}`", self.debug_string()), None, None)))) + } + + /// On unix, executes the process using the unix syscall `execvp`, which will block this + /// process, and will only return if there is an error. On windows this is a synonym for + /// `exec`. + #[cfg(windows)] + pub fn exec_replace(&self) -> CargoResult<()> { + self.exec() + } + + /// Execute the process, returning the stdio output, or an error if non-zero exit status. + pub fn exec_with_output(&self) -> CargoResult { + let mut command = self.build_command(); + + let output = command.output().chain_err(|| { + CargoErrorKind::ProcessErrorKind( + process_error( + &format!("could not execute process `{}`", self.debug_string()), + None, None)) + })?; + + if output.status.success() { + Ok(output) + } else { + Err(CargoErrorKind::ProcessErrorKind(process_error( + &format!("process didn't exit successfully: `{}`", self.debug_string()), + Some(&output.status), Some(&output))).into()) + } + } + + /// Execute a command, passing each line of stdout and stderr to the supplied callbacks, which + /// can mutate the string data. + /// + /// If any invocations of these function return an error, it will be propagated. + /// + /// Optionally, output can be passed to errors using `print_output` + pub fn exec_with_streaming(&self, + on_stdout_line: &mut FnMut(&str) -> CargoResult<()>, + on_stderr_line: &mut FnMut(&str) -> CargoResult<()>, + print_output: bool) + -> CargoResult { + let mut stdout = Vec::new(); + let mut stderr = Vec::new(); + + let mut cmd = self.build_command(); + cmd.stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()); + + let mut callback_error = None; + let status = (|| { + let mut child = cmd.spawn()?; + let out = child.stdout.take().unwrap(); + let err = child.stderr.take().unwrap(); + read2(out, err, &mut |is_out, data, eof| { + let idx = if eof { + data.len() + } else { + match data.iter().rposition(|b| *b == b'\n') { + Some(i) => i + 1, + None => return, + } + }; + let data = data.drain(..idx); + let dst = if is_out {&mut stdout} else {&mut stderr}; + let start = dst.len(); + dst.extend(data); + for line in String::from_utf8_lossy(&dst[start..]).lines() { + if callback_error.is_some() { break } + let callback_result = if is_out { + on_stdout_line(line) + } else { + on_stderr_line(line) + }; + if let Err(e) = callback_result { + callback_error = Some(e); + } + } + })?; + child.wait() + })().chain_err(|| { + CargoErrorKind::ProcessErrorKind( + process_error(&format!("could not execute process `{}`", + self.debug_string()), + None, None)) + })?; + let output = Output { + stdout: stdout, + stderr: stderr, + status: status, + }; + + { + let to_print = if print_output { + Some(&output) + } else { + None + }; + if !output.status.success() { + return Err(CargoErrorKind::ProcessErrorKind(process_error( + &format!("process didn't exit successfully: `{}`", self.debug_string()), + Some(&output.status), to_print)).into()) + } else if let Some(e) = callback_error { + return Err(CargoError::with_chain(e, + CargoErrorKind::ProcessErrorKind(process_error( + &format!("failed to parse process output: `{}`", self.debug_string()), + Some(&output.status), to_print)))) + } + } + + Ok(output) + } + + /// Converts ProcessBuilder into a `std::process::Command`, and handles the jobserver if + /// present. + pub fn build_command(&self) -> Command { + let mut command = Command::new(&self.program); + if let Some(cwd) = self.get_cwd() { + command.current_dir(cwd); + } + for arg in &self.args { + command.arg(arg); + } + for (k, v) in &self.env { + match *v { + Some(ref v) => { command.env(k, v); } + None => { command.env_remove(k); } + } + } + if let Some(ref c) = self.jobserver { + c.configure(&mut command); + } + command + } + + /// Get the command line for the process as a string. + fn debug_string(&self) -> String { + let mut program = format!("{}", self.program.to_string_lossy()); + for arg in &self.args { + program.push(' '); + program.push_str(&format!("{}", arg.to_string_lossy())); + } + program + } +} + +/// A helper function to create a ProcessBuilder. +pub fn process>(cmd: T) -> ProcessBuilder { + ProcessBuilder { + program: cmd.as_ref().to_os_string(), + args: Vec::new(), + cwd: None, + env: HashMap::new(), + jobserver: None, + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/profile.rs b/collector/compile-benchmarks/cargo/src/cargo/util/profile.rs new file mode 100644 index 000000000..da90566f1 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/profile.rs @@ -0,0 +1,71 @@ +use std::env; +use std::fmt; +use std::mem; +use std::time; +use std::iter::repeat; +use std::cell::RefCell; + +thread_local!(static PROFILE_STACK: RefCell> = RefCell::new(Vec::new())); +thread_local!(static MESSAGES: RefCell> = RefCell::new(Vec::new())); + +type Message = (usize, u64, String); + +pub struct Profiler { + desc: String, +} + +fn enabled_level() -> Option { + env::var("CARGO_PROFILE").ok().and_then(|s| s.parse().ok()) +} + +pub fn start(desc: T) -> Profiler { + if enabled_level().is_none() { return Profiler { desc: String::new() } } + + PROFILE_STACK.with(|stack| stack.borrow_mut().push(time::Instant::now())); + + Profiler { + desc: desc.to_string(), + } +} + +impl Drop for Profiler { + fn drop(&mut self) { + let enabled = match enabled_level() { + Some(i) => i, + None => return, + }; + + let start = PROFILE_STACK.with(|stack| stack.borrow_mut().pop().unwrap()); + let duration = start.elapsed(); + let duration_ms = duration.as_secs() * 1000 + u64::from(duration.subsec_nanos() / 1_000_000); + + let stack_len = PROFILE_STACK.with(|stack| stack.borrow().len()); + if stack_len == 0 { + fn print(lvl: usize, msgs: &[Message], enabled: usize) { + if lvl > enabled { return } + let mut last = 0; + for (i, &(l, time, ref msg)) in msgs.iter().enumerate() { + if l != lvl { continue } + println!("{} {:6}ms - {}", + repeat(" ").take(lvl + 1).collect::(), + time, msg); + + print(lvl + 1, &msgs[last..i], enabled); + last = i; + } + + } + MESSAGES.with(|msgs_rc| { + let mut msgs = msgs_rc.borrow_mut(); + msgs.push((0, duration_ms, + mem::replace(&mut self.desc, String::new()))); + print(0, &msgs, enabled); + }); + } else { + MESSAGES.with(|msgs| { + let msg = mem::replace(&mut self.desc, String::new()); + msgs.borrow_mut().push((stack_len, duration_ms, msg)); + }); + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/read2.rs b/collector/compile-benchmarks/cargo/src/cargo/util/read2.rs new file mode 100644 index 000000000..b3aa7d8b2 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/read2.rs @@ -0,0 +1,177 @@ +pub use self::imp::read2; + +#[cfg(unix)] +mod imp { + use std::io::prelude::*; + use std::io; + use std::mem; + use std::os::unix::prelude::*; + use std::process::{ChildStdout, ChildStderr}; + use libc; + + pub fn read2(mut out_pipe: ChildStdout, + mut err_pipe: ChildStderr, + data: &mut FnMut(bool, &mut Vec, bool)) -> io::Result<()> { + unsafe { + libc::fcntl(out_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + libc::fcntl(err_pipe.as_raw_fd(), libc::F_SETFL, libc::O_NONBLOCK); + } + + let mut out_done = false; + let mut err_done = false; + let mut out = Vec::new(); + let mut err = Vec::new(); + + let mut fds: [libc::pollfd; 2] = unsafe { mem::zeroed() }; + fds[0].fd = out_pipe.as_raw_fd(); + fds[0].events = libc::POLLIN; + fds[1].fd = err_pipe.as_raw_fd(); + fds[1].events = libc::POLLIN; + loop { + // wait for either pipe to become readable using `select` + let r = unsafe { libc::poll(fds.as_mut_ptr(), 2, -1) }; + if r == -1 { + let err = io::Error::last_os_error(); + if err.kind() == io::ErrorKind::Interrupted { + continue + } + return Err(err) + } + + // Read as much as we can from each pipe, ignoring EWOULDBLOCK or + // EAGAIN. If we hit EOF, then this will happen because the underlying + // reader will return Ok(0), in which case we'll see `Ok` ourselves. In + // this case we flip the other fd back into blocking mode and read + // whatever's leftover on that file descriptor. + let handle = |res: io::Result<_>| { + match res { + Ok(_) => Ok(true), + Err(e) => { + if e.kind() == io::ErrorKind::WouldBlock { + Ok(false) + } else { + Err(e) + } + } + } + }; + if !out_done && fds[0].revents != 0 && handle(out_pipe.read_to_end(&mut out))? { + out_done = true; + } + data(true, &mut out, out_done); + if !err_done && fds[1].revents != 0 && handle(err_pipe.read_to_end(&mut err))? { + err_done = true; + } + data(false, &mut err, err_done); + + if out_done && err_done { + return Ok(()) + } + } + } +} + +#[cfg(windows)] +mod imp { + extern crate miow; + extern crate winapi; + + use std::io; + use std::os::windows::prelude::*; + use std::process::{ChildStdout, ChildStderr}; + use std::slice; + + use self::miow::iocp::{CompletionPort, CompletionStatus}; + use self::miow::pipe::NamedPipe; + use self::miow::Overlapped; + use self::winapi::ERROR_BROKEN_PIPE; + + struct Pipe<'a> { + dst: &'a mut Vec, + overlapped: Overlapped, + pipe: NamedPipe, + done: bool, + } + + pub fn read2(out_pipe: ChildStdout, + err_pipe: ChildStderr, + data: &mut FnMut(bool, &mut Vec, bool)) -> io::Result<()> { + let mut out = Vec::new(); + let mut err = Vec::new(); + + let port = CompletionPort::new(1)?; + port.add_handle(0, &out_pipe)?; + port.add_handle(1, &err_pipe)?; + + unsafe { + let mut out_pipe = Pipe::new(out_pipe, &mut out); + let mut err_pipe = Pipe::new(err_pipe, &mut err); + + out_pipe.read()?; + err_pipe.read()?; + + let mut status = [CompletionStatus::zero(), CompletionStatus::zero()]; + + while !out_pipe.done || !err_pipe.done { + for status in port.get_many(&mut status, None)? { + if status.token() == 0 { + out_pipe.complete(status); + data(true, out_pipe.dst, out_pipe.done); + out_pipe.read()?; + } else { + err_pipe.complete(status); + data(false, err_pipe.dst, err_pipe.done); + err_pipe.read()?; + } + } + } + + Ok(()) + } + } + + impl<'a> Pipe<'a> { + unsafe fn new(p: P, dst: &'a mut Vec) -> Pipe<'a> { + Pipe { + dst: dst, + pipe: NamedPipe::from_raw_handle(p.into_raw_handle()), + overlapped: Overlapped::zero(), + done: false, + } + } + + unsafe fn read(&mut self) -> io::Result<()> { + let dst = slice_to_end(self.dst); + match self.pipe.read_overlapped(dst, self.overlapped.raw()) { + Ok(_) => Ok(()), + Err(e) => { + if e.raw_os_error() == Some(ERROR_BROKEN_PIPE as i32) { + self.done = true; + Ok(()) + } else { + Err(e) + } + } + } + } + + unsafe fn complete(&mut self, status: &CompletionStatus) { + let prev = self.dst.len(); + self.dst.set_len(prev + status.bytes_transferred() as usize); + if status.bytes_transferred() == 0 { + self.done = true; + } + } + } + + unsafe fn slice_to_end(v: &mut Vec) -> &mut [u8] { + if v.capacity() == 0 { + v.reserve(16); + } + if v.capacity() == v.len() { + v.reserve(1); + } + slice::from_raw_parts_mut(v.as_mut_ptr().offset(v.len() as isize), + v.capacity() - v.len()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/rustc.rs b/collector/compile-benchmarks/cargo/src/cargo/util/rustc.rs new file mode 100644 index 000000000..a9b65c959 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/rustc.rs @@ -0,0 +1,62 @@ +use std::path::PathBuf; + +use util::{self, CargoResult, internal, ProcessBuilder}; + +/// Information on the `rustc` executable +#[derive(Debug)] +pub struct Rustc { + /// The location of the exe + pub path: PathBuf, + /// An optional program that will be passed the path of the rust exe as its first argument, and + /// rustc args following this. + pub wrapper: Option, + /// Verbose version information (the output of `rustc -vV`) + pub verbose_version: String, + /// The host triple (arch-platform-OS), this comes from verbose_version. + pub host: String, +} + +impl Rustc { + /// Run the compiler at `path` to learn various pieces of information about + /// it, with an optional wrapper. + /// + /// If successful this function returns a description of the compiler along + /// with a list of its capabilities. + pub fn new(path: PathBuf, wrapper: Option) -> CargoResult { + let mut cmd = util::process(&path); + cmd.arg("-vV"); + + let output = cmd.exec_with_output()?; + + let verbose_version = String::from_utf8(output.stdout).map_err(|_| { + internal("rustc -v didn't return utf8 output") + })?; + + let host = { + let triple = verbose_version.lines().find(|l| { + l.starts_with("host: ") + }).map(|l| &l[6..]).ok_or_else(|| internal("rustc -v didn't have a line for `host:`"))?; + triple.to_string() + }; + + Ok(Rustc { + path: path, + wrapper: wrapper, + verbose_version: verbose_version, + host: host, + }) + } + + /// Get a process builder set up to use the found rustc version, with a wrapper if Some + pub fn process(&self) -> ProcessBuilder { + if let Some(ref wrapper) = self.wrapper { + let mut cmd = util::process(wrapper); + { + cmd.arg(&self.path); + } + cmd + } else { + util::process(&self.path) + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/sha256.rs b/collector/compile-benchmarks/cargo/src/cargo/util/sha256.rs new file mode 100644 index 000000000..c805d7f1a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/sha256.rs @@ -0,0 +1,23 @@ +extern crate crypto_hash; +use self::crypto_hash::{Hasher,Algorithm}; +use std::io::Write; + +pub struct Sha256(Hasher); + +impl Sha256 { + pub fn new() -> Sha256 { + let hasher = Hasher::new(Algorithm::SHA256); + Sha256(hasher) + } + + pub fn update(&mut self, bytes: &[u8]) { + let _ = self.0.write_all(bytes); + } + + pub fn finish(&mut self) -> [u8; 32] { + let mut ret = [0u8; 32]; + let data = self.0.finish(); + ret.copy_from_slice(&data[..]); + ret + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/to_semver.rs b/collector/compile-benchmarks/cargo/src/cargo/util/to_semver.rs new file mode 100644 index 000000000..ad6aff16e --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/to_semver.rs @@ -0,0 +1,30 @@ +use semver::Version; + +pub trait ToSemver { + fn to_semver(self) -> Result; +} + +impl ToSemver for Version { + fn to_semver(self) -> Result { Ok(self) } +} + +impl<'a> ToSemver for &'a str { + fn to_semver(self) -> Result { + match Version::parse(self) { + Ok(v) => Ok(v), + Err(..) => Err(format!("cannot parse '{}' as a semver", self)), + } + } +} + +impl<'a> ToSemver for &'a String { + fn to_semver(self) -> Result { + (**self).to_semver() + } +} + +impl<'a> ToSemver for &'a Version { + fn to_semver(self) -> Result { + Ok(self.clone()) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/to_url.rs b/collector/compile-benchmarks/cargo/src/cargo/util/to_url.rs new file mode 100644 index 000000000..f6a4d23a5 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/to_url.rs @@ -0,0 +1,27 @@ +use std::path::Path; + +use url::Url; + +use util::CargoResult; + +/// A type that can be converted to a Url +pub trait ToUrl { + /// Performs the conversion + fn to_url(self) -> CargoResult; +} + +impl<'a> ToUrl for &'a str { + fn to_url(self) -> CargoResult { + Url::parse(self).map_err(|s| { + format!("invalid url `{}`: {}", self, s).into() + }) + } +} + +impl<'a> ToUrl for &'a Path { + fn to_url(self) -> CargoResult { + Url::from_file_path(self).map_err(|()| { + format!("invalid path url `{}`", self.display()).into() + }) + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/toml/mod.rs b/collector/compile-benchmarks/cargo/src/cargo/util/toml/mod.rs new file mode 100644 index 000000000..32122444d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/toml/mod.rs @@ -0,0 +1,1085 @@ +use std::collections::{HashMap, BTreeMap, HashSet, BTreeSet}; +use std::fmt; +use std::fs; +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::str; + +use semver::{self, VersionReq}; +use serde::ser; +use serde::de::{self, Deserialize}; +use serde_ignored; +use toml; +use url::Url; + +use core::{SourceId, Profiles, PackageIdSpec, GitReference, WorkspaceConfig, WorkspaceRootConfig}; +use core::{Summary, Manifest, Target, Dependency, PackageId}; +use core::{EitherManifest, VirtualManifest, Features}; +use core::dependency::{Kind, Platform}; +use core::manifest::{LibKind, Profile, ManifestMetadata}; +use sources::CRATES_IO; +use util::paths; +use util::{self, ToUrl, Config}; +use util::errors::{CargoError, CargoResult, CargoResultExt}; + +mod targets; +use self::targets::targets; + +pub fn read_manifest(path: &Path, source_id: &SourceId, config: &Config) + -> CargoResult<(EitherManifest, Vec)> { + trace!("read_manifest; path={}; source-id={}", path.display(), source_id); + let contents = paths::read(path)?; + + do_read_manifest(&contents, path, source_id, config).chain_err(|| { + format!("failed to parse manifest at `{}`", path.display()) + }) +} + +fn do_read_manifest(contents: &str, + manifest_file: &Path, + source_id: &SourceId, + config: &Config) + -> CargoResult<(EitherManifest, Vec)> { + let package_root = manifest_file.parent().unwrap(); + + let toml = { + let pretty_filename = + util::without_prefix(manifest_file, config.cwd()).unwrap_or(manifest_file); + parse(contents, pretty_filename, config)? + }; + + let mut unused = BTreeSet::new(); + let manifest: TomlManifest = serde_ignored::deserialize(toml, |path| { + let mut key = String::new(); + stringify(&mut key, &path); + unused.insert(key); + })?; + + let manifest = Rc::new(manifest); + return match TomlManifest::to_real_manifest(&manifest, + source_id, + package_root, + config) { + Ok((mut manifest, paths)) => { + for key in unused { + manifest.add_warning(format!("unused manifest key: {}", key)); + } + if !manifest.targets().iter().any(|t| !t.is_custom_build()) { + bail!("no targets specified in the manifest\n \ + either src/lib.rs, src/main.rs, a [lib] section, or \ + [[bin]] section must be present") + } + Ok((EitherManifest::Real(manifest), paths)) + } + Err(e) => { + match TomlManifest::to_virtual_manifest(&manifest, + source_id, + package_root, + config) { + Ok((m, paths)) => Ok((EitherManifest::Virtual(m), paths)), + Err(..) => Err(e), + } + } + }; + + fn stringify(dst: &mut String, path: &serde_ignored::Path) { + use serde_ignored::Path; + + match *path { + Path::Root => {} + Path::Seq { parent, index } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(&index.to_string()); + } + Path::Map { parent, ref key } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(key); + } + Path::Some { parent } | + Path::NewtypeVariant { parent } | + Path::NewtypeStruct { parent } => stringify(dst, parent), + } + } +} + +pub fn parse(toml: &str, + file: &Path, + config: &Config) -> CargoResult { + let first_error = match toml.parse() { + Ok(ret) => return Ok(ret), + Err(e) => e, + }; + + let mut second_parser = toml::de::Deserializer::new(toml); + second_parser.set_require_newline_after_table(false); + if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { + let msg = format!("\ +TOML file found which contains invalid syntax and will soon not parse +at `{}`. + +The TOML spec requires newlines after table definitions (e.g. `[a] b = 1` is +invalid), but this file has a table header which does not have a newline after +it. A newline needs to be added and this warning will soon become a hard error +in the future.", file.display()); + config.shell().warn(&msg)?; + return Ok(ret) + } + + Err(first_error).chain_err(|| { + "could not parse input as TOML" + }) +} + +type TomlLibTarget = TomlTarget; +type TomlBinTarget = TomlTarget; +type TomlExampleTarget = TomlTarget; +type TomlTestTarget = TomlTarget; +type TomlBenchTarget = TomlTarget; + +#[derive(Debug, Serialize)] +#[serde(untagged)] +pub enum TomlDependency { + Simple(String), + Detailed(DetailedTomlDependency) +} + +impl<'de> de::Deserialize<'de> for TomlDependency { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer<'de> + { + struct TomlDependencyVisitor; + + impl<'de> de::Visitor<'de> for TomlDependencyVisitor { + type Value = TomlDependency; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a version string like \"0.9.8\" or a \ + detailed dependency like { version = \"0.9.8\" }") + } + + fn visit_str(self, s: &str) -> Result + where E: de::Error + { + Ok(TomlDependency::Simple(s.to_owned())) + } + + fn visit_map(self, map: V) -> Result + where V: de::MapAccess<'de> + { + let mvd = de::value::MapAccessDeserializer::new(map); + DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) + } + } + + deserializer.deserialize_any(TomlDependencyVisitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct DetailedTomlDependency { + version: Option, + path: Option, + git: Option, + branch: Option, + tag: Option, + rev: Option, + features: Option>, + optional: Option, + #[serde(rename = "default-features")] + default_features: Option, + #[serde(rename = "default_features")] + default_features2: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TomlManifest { + package: Option>, + project: Option>, + profile: Option, + lib: Option, + bin: Option>, + example: Option>, + test: Option>, + bench: Option>, + dependencies: Option>, + #[serde(rename = "dev-dependencies")] + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, + #[serde(rename = "build-dependencies")] + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + features: Option>>, + target: Option>, + replace: Option>, + patch: Option>>, + workspace: Option, + badges: Option>>, + #[serde(rename = "cargo-features")] + cargo_features: Option>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct TomlProfiles { + test: Option, + doc: Option, + bench: Option, + dev: Option, + release: Option, +} + +#[derive(Clone, Debug)] +pub struct TomlOptLevel(String); + +impl<'de> de::Deserialize<'de> for TomlOptLevel { + fn deserialize(d: D) -> Result + where D: de::Deserializer<'de> + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = TomlOptLevel; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("an optimization level") + } + + fn visit_i64(self, value: i64) -> Result + where E: de::Error + { + Ok(TomlOptLevel(value.to_string())) + } + + fn visit_str(self, value: &str) -> Result + where E: de::Error + { + if value == "s" || value == "z" { + Ok(TomlOptLevel(value.to_string())) + } else { + Err(E::custom(format!("must be an integer, `z`, or `s`, \ + but found: {}", value))) + } + } + } + + d.deserialize_u32(Visitor) + } +} + +impl ser::Serialize for TomlOptLevel { + fn serialize(&self, serializer: S) -> Result + where S: ser::Serializer, + { + match self.0.parse::() { + Ok(n) => n.serialize(serializer), + Err(_) => self.0.serialize(serializer), + } + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum U32OrBool { + U32(u32), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for U32OrBool { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer<'de> + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = U32OrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or an integer") + } + + fn visit_i64(self, u: i64) -> Result + where E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_u64(self, u: u64) -> Result + where E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_bool(self, b: bool) -> Result + where E: de::Error, + { + Ok(U32OrBool::Bool(b)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct TomlProfile { + #[serde(rename = "opt-level")] + opt_level: Option, + lto: Option, + #[serde(rename = "codegen-units")] + codegen_units: Option, + debug: Option, + #[serde(rename = "debug-assertions")] + debug_assertions: Option, + rpath: Option, + panic: Option, + #[serde(rename = "overflow-checks")] + overflow_checks: Option, +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum StringOrBool { + String(String), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for StringOrBool { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer<'de> + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a boolean or a string") + } + + fn visit_str(self, s: &str) -> Result + where E: de::Error, + { + Ok(StringOrBool::String(s.to_string())) + } + + fn visit_bool(self, b: bool) -> Result + where E: de::Error, + { + Ok(StringOrBool::Bool(b)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct TomlProject { + name: String, + version: semver::Version, + authors: Option>, + build: Option, + links: Option, + exclude: Option>, + include: Option>, + publish: Option, + workspace: Option, + #[serde(rename = "im-a-teapot")] + im_a_teapot: Option, + + // package metadata + description: Option, + homepage: Option, + documentation: Option, + readme: Option, + keywords: Option>, + categories: Option>, + license: Option, + #[serde(rename = "license-file")] + license_file: Option, + repository: Option, + metadata: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TomlWorkspace { + members: Option>, + exclude: Option>, +} + +impl TomlProject { + pub fn to_package_id(&self, source_id: &SourceId) -> CargoResult { + PackageId::new(&self.name, self.version.clone(), source_id) + } +} + +struct Context<'a, 'b> { + pkgid: Option<&'a PackageId>, + deps: &'a mut Vec, + source_id: &'a SourceId, + nested_paths: &'a mut Vec, + config: &'b Config, + warnings: &'a mut Vec, + platform: Option, + root: &'a Path, +} + +impl TomlManifest { + pub fn prepare_for_publish(&self) -> TomlManifest { + let mut package = self.package.as_ref() + .or_else(|| self.project.as_ref()) + .unwrap() + .clone(); + package.workspace = None; + return TomlManifest { + package: Some(package), + project: None, + profile: self.profile.clone(), + lib: self.lib.clone(), + bin: self.bin.clone(), + example: self.example.clone(), + test: self.test.clone(), + bench: self.bench.clone(), + dependencies: map_deps(self.dependencies.as_ref()), + dev_dependencies: map_deps(self.dev_dependencies.as_ref() + .or_else(|| self.dev_dependencies2.as_ref())), + dev_dependencies2: None, + build_dependencies: map_deps(self.build_dependencies.as_ref() + .or_else(|| self.build_dependencies2.as_ref())), + build_dependencies2: None, + features: self.features.clone(), + target: self.target.as_ref().map(|target_map| { + target_map.iter().map(|(k, v)| { + (k.clone(), TomlPlatform { + dependencies: map_deps(v.dependencies.as_ref()), + dev_dependencies: map_deps(v.dev_dependencies.as_ref() + .or_else(|| v.dev_dependencies2.as_ref())), + dev_dependencies2: None, + build_dependencies: map_deps(v.build_dependencies.as_ref() + .or_else(|| v.build_dependencies2.as_ref())), + build_dependencies2: None, + }) + }).collect() + }), + replace: None, + patch: None, + workspace: None, + badges: self.badges.clone(), + cargo_features: self.cargo_features.clone(), + }; + + fn map_deps(deps: Option<&BTreeMap>) + -> Option> + { + let deps = match deps { + Some(deps) => deps, + None => return None + }; + Some(deps.iter().map(|(k, v)| (k.clone(), map_dependency(v))).collect()) + } + + fn map_dependency(dep: &TomlDependency) -> TomlDependency { + match *dep { + TomlDependency::Detailed(ref d) => { + let mut d = d.clone(); + d.path.take(); // path dependencies become crates.io deps + TomlDependency::Detailed(d) + } + TomlDependency::Simple(ref s) => { + TomlDependency::Detailed(DetailedTomlDependency { + version: Some(s.clone()), + ..Default::default() + }) + } + } + } + } + + fn to_real_manifest(me: &Rc, + source_id: &SourceId, + package_root: &Path, + config: &Config) + -> CargoResult<(Manifest, Vec)> { + let mut nested_paths = vec![]; + let mut warnings = vec![]; + let mut errors = vec![]; + + let project = me.project.as_ref().or_else(|| me.package.as_ref()); + let project = project.ok_or_else(|| { + CargoError::from("no `package` section found.") + })?; + + let package_name = project.name.trim(); + if package_name.is_empty() { + bail!("package name cannot be an empty string.") + } + + let pkgid = project.to_package_id(source_id)?; + + // If we have no lib at all, use the inferred lib if available + // If we have a lib with a path, we're done + // If we have a lib with no path, use the inferred lib or_else package name + let targets = targets(me, package_name, package_root, &project.build, + &mut warnings, &mut errors)?; + + if targets.is_empty() { + debug!("manifest has no build targets"); + } + + if let Err(e) = unique_build_targets(&targets, package_root) { + warnings.push(format!("file found to be present in multiple \ + build targets: {}", e)); + } + + let mut deps = Vec::new(); + let replace; + let patch; + + { + + let mut cx = Context { + pkgid: Some(&pkgid), + deps: &mut deps, + source_id: source_id, + nested_paths: &mut nested_paths, + config: config, + warnings: &mut warnings, + platform: None, + root: package_root, + }; + + fn process_dependencies( + cx: &mut Context, + new_deps: Option<&BTreeMap>, + kind: Option) + -> CargoResult<()> + { + let dependencies = match new_deps { + Some(dependencies) => dependencies, + None => return Ok(()) + }; + for (n, v) in dependencies.iter() { + let dep = v.to_dependency(n, cx, kind)?; + cx.deps.push(dep); + } + + Ok(()) + } + + // Collect the deps + process_dependencies(&mut cx, me.dependencies.as_ref(), + None)?; + let dev_deps = me.dev_dependencies.as_ref() + .or_else(|| me.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + let build_deps = me.build_dependencies.as_ref() + .or_else(|| me.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + + for (name, platform) in me.target.iter().flat_map(|t| t) { + cx.platform = Some(name.parse()?); + process_dependencies(&mut cx, platform.dependencies.as_ref(), + None)?; + let build_deps = platform.build_dependencies.as_ref() + .or_else(|| platform.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + let dev_deps = platform.dev_dependencies.as_ref() + .or_else(|| platform.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + } + + replace = me.replace(&mut cx)?; + patch = me.patch(&mut cx)?; + } + + { + let mut names_sources = BTreeMap::new(); + for dep in &deps { + let name = dep.name(); + let prev = names_sources.insert(name, dep.source_id()); + if prev.is_some() && prev != Some(dep.source_id()) { + bail!("Dependency '{}' has different source paths depending on the build \ + target. Each dependency must have a single canonical source path \ + irrespective of build target.", name); + } + } + } + + let exclude = project.exclude.clone().unwrap_or_default(); + let include = project.include.clone().unwrap_or_default(); + + let summary = Summary::new(pkgid, deps, me.features.clone() + .unwrap_or_else(BTreeMap::new))?; + let metadata = ManifestMetadata { + description: project.description.clone(), + homepage: project.homepage.clone(), + documentation: project.documentation.clone(), + readme: project.readme.clone(), + authors: project.authors.clone().unwrap_or_default(), + license: project.license.clone(), + license_file: project.license_file.clone(), + repository: project.repository.clone(), + keywords: project.keywords.clone().unwrap_or_default(), + categories: project.categories.clone().unwrap_or_default(), + badges: me.badges.clone().unwrap_or_default(), + }; + + let workspace_config = match (me.workspace.as_ref(), + project.workspace.as_ref()) { + (Some(config), None) => { + WorkspaceConfig::Root( + WorkspaceRootConfig::new(&package_root, &config.members, &config.exclude) + ) + } + (None, root) => { + WorkspaceConfig::Member { root: root.cloned() } + } + (Some(..), Some(..)) => { + bail!("cannot configure both `package.workspace` and \ + `[workspace]`, only one can be specified") + } + }; + let profiles = build_profiles(&me.profile); + let publish = project.publish.unwrap_or(true); + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(cargo_features, &mut warnings)?; + let mut manifest = Manifest::new(summary, + targets, + exclude, + include, + project.links.clone(), + metadata, + profiles, + publish, + replace, + patch, + workspace_config, + features, + project.im_a_teapot, + Rc::clone(me)); + if project.license_file.is_some() && project.license.is_some() { + manifest.add_warning("only one of `license` or \ + `license-file` is necessary".to_string()); + } + for warning in warnings { + manifest.add_warning(warning); + } + for error in errors { + manifest.add_critical_warning(error); + } + + manifest.feature_gate()?; + + Ok((manifest, nested_paths)) + } + + fn to_virtual_manifest(me: &Rc, + source_id: &SourceId, + root: &Path, + config: &Config) + -> CargoResult<(VirtualManifest, Vec)> { + if me.project.is_some() { + bail!("virtual manifests do not define [project]"); + } + if me.package.is_some() { + bail!("virtual manifests do not define [package]"); + } + if me.lib.is_some() { + bail!("virtual manifests do not specify [lib]"); + } + if me.bin.is_some() { + bail!("virtual manifests do not specify [[bin]]"); + } + if me.example.is_some() { + bail!("virtual manifests do not specify [[example]]"); + } + if me.test.is_some() { + bail!("virtual manifests do not specify [[test]]"); + } + if me.bench.is_some() { + bail!("virtual manifests do not specify [[bench]]"); + } + + let mut nested_paths = Vec::new(); + let mut warnings = Vec::new(); + let mut deps = Vec::new(); + let (replace, patch) = { + let mut cx = Context { + pkgid: None, + deps: &mut deps, + source_id: source_id, + nested_paths: &mut nested_paths, + config: config, + warnings: &mut warnings, + platform: None, + root: root + }; + (me.replace(&mut cx)?, me.patch(&mut cx)?) + }; + let profiles = build_profiles(&me.profile); + let workspace_config = match me.workspace { + Some(ref config) => { + WorkspaceConfig::Root( + WorkspaceRootConfig::new(&root, &config.members, &config.exclude) + ) + } + None => { + bail!("virtual manifests must be configured with [workspace]"); + } + }; + Ok((VirtualManifest::new(replace, patch, workspace_config, profiles), nested_paths)) + } + + fn replace(&self, cx: &mut Context) + -> CargoResult> { + if self.patch.is_some() && self.replace.is_some() { + bail!("cannot specify both [replace] and [patch]"); + } + let mut replace = Vec::new(); + for (spec, replacement) in self.replace.iter().flat_map(|x| x) { + let mut spec = PackageIdSpec::parse(spec).chain_err(|| { + format!("replacements must specify a valid semver \ + version to replace, but `{}` does not", + spec) + })?; + if spec.url().is_none() { + spec.set_url(CRATES_IO.parse().unwrap()); + } + + let version_specified = match *replacement { + TomlDependency::Detailed(ref d) => d.version.is_some(), + TomlDependency::Simple(..) => true, + }; + if version_specified { + bail!("replacements cannot specify a version \ + requirement, but found one for `{}`", spec); + } + + let mut dep = replacement.to_dependency(spec.name(), cx, None)?; + { + let version = spec.version().ok_or_else(|| { + CargoError::from(format!("replacements must specify a version \ + to replace, but `{}` does not", + spec)) + })?; + dep.set_version_req(VersionReq::exact(version)); + } + replace.push((spec, dep)); + } + Ok(replace) + } + + fn patch(&self, cx: &mut Context) + -> CargoResult>> { + let mut patch = HashMap::new(); + for (url, deps) in self.patch.iter().flat_map(|x| x) { + let url = match &url[..] { + "crates-io" => CRATES_IO.parse().unwrap(), + _ => url.to_url()?, + }; + patch.insert(url, deps.iter().map(|(name, dep)| { + dep.to_dependency(name, cx, None) + }).collect::>>()?); + } + Ok(patch) + } + + fn maybe_custom_build(&self, + build: &Option, + package_root: &Path) + -> Option { + let build_rs = package_root.join("build.rs"); + match *build { + Some(StringOrBool::Bool(false)) => None, // explicitly no build script + Some(StringOrBool::Bool(true)) => Some(build_rs.into()), + Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), + None => { + match fs::metadata(&build_rs) { + // If there is a build.rs file next to the Cargo.toml, assume it is + // a build script + Ok(ref e) if e.is_file() => Some(build_rs.into()), + Ok(_) | Err(_) => None, + } + } + } + } +} + +/// Will check a list of build targets, and make sure the target names are unique within a vector. +/// If not, the name of the offending build target is returned. +fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { + let mut seen = HashSet::new(); + for v in targets.iter().map(|e| package_root.join(e.src_path())) { + if !seen.insert(v.clone()) { + return Err(v.display().to_string()); + } + } + Ok(()) +} + +impl TomlDependency { + fn to_dependency(&self, + name: &str, + cx: &mut Context, + kind: Option) + -> CargoResult { + let details = match *self { + TomlDependency::Simple(ref version) => DetailedTomlDependency { + version: Some(version.clone()), + .. Default::default() + }, + TomlDependency::Detailed(ref details) => details.clone(), + }; + + if details.version.is_none() && details.path.is_none() && + details.git.is_none() { + let msg = format!("dependency ({}) specified without \ + providing a local path, Git repository, or \ + version to use. This will be considered an \ + error in future versions", name); + cx.warnings.push(msg); + } + + if details.git.is_none() { + let git_only_keys = [ + (&details.branch, "branch"), + (&details.tag, "tag"), + (&details.rev, "rev") + ]; + + for &(key, key_name) in &git_only_keys { + if key.is_some() { + let msg = format!("key `{}` is ignored for dependency ({}). \ + This will be considered an error in future versions", + key_name, name); + cx.warnings.push(msg) + } + } + } + + let new_source_id = match (details.git.as_ref(), details.path.as_ref()) { + (Some(git), maybe_path) => { + if maybe_path.is_some() { + let msg = format!("dependency ({}) specification is ambiguous. \ + Only one of `git` or `path` is allowed. \ + This will be considered an error in future versions", name); + cx.warnings.push(msg) + } + + let n_details = [&details.branch, &details.tag, &details.rev] + .iter() + .filter(|d| d.is_some()) + .count(); + + if n_details > 1 { + let msg = format!("dependency ({}) specification is ambiguous. \ + Only one of `branch`, `tag` or `rev` is allowed. \ + This will be considered an error in future versions", name); + cx.warnings.push(msg) + } + + let reference = details.branch.clone().map(GitReference::Branch) + .or_else(|| details.tag.clone().map(GitReference::Tag)) + .or_else(|| details.rev.clone().map(GitReference::Rev)) + .unwrap_or_else(|| GitReference::Branch("master".to_string())); + let loc = git.to_url()?; + SourceId::for_git(&loc, reference)? + }, + (None, Some(path)) => { + cx.nested_paths.push(PathBuf::from(path)); + // If the source id for the package we're parsing is a path + // source, then we normalize the path here to get rid of + // components like `..`. + // + // The purpose of this is to get a canonical id for the package + // that we're depending on to ensure that builds of this package + // always end up hashing to the same value no matter where it's + // built from. + if cx.source_id.is_path() { + let path = cx.root.join(path); + let path = util::normalize_path(&path); + SourceId::for_path(&path)? + } else { + cx.source_id.clone() + } + }, + (None, None) => SourceId::crates_io(cx.config)?, + }; + + let version = details.version.as_ref().map(|v| &v[..]); + let mut dep = match cx.pkgid { + Some(id) => { + Dependency::parse(name, version, &new_source_id, + id, cx.config)? + } + None => Dependency::parse_no_deprecated(name, version, &new_source_id)?, + }; + dep.set_features(details.features.unwrap_or_default()) + .set_default_features(details.default_features + .or(details.default_features2) + .unwrap_or(true)) + .set_optional(details.optional.unwrap_or(false)) + .set_platform(cx.platform.clone()); + if let Some(kind) = kind { + dep.set_kind(kind); + } + Ok(dep) + } +} + +#[derive(Default, Serialize, Deserialize, Debug, Clone)] +struct TomlTarget { + name: Option, + + // The intention was to only accept `crate-type` here but historical + // versions of Cargo also accepted `crate_type`, so look for both. + #[serde(rename = "crate-type")] + crate_type: Option>, + #[serde(rename = "crate_type")] + crate_type2: Option>, + + path: Option, + test: Option, + doctest: Option, + bench: Option, + doc: Option, + plugin: Option, + #[serde(rename = "proc-macro")] + proc_macro: Option, + #[serde(rename = "proc_macro")] + proc_macro2: Option, + harness: Option, + #[serde(rename = "required-features")] + required_features: Option>, +} + +#[derive(Clone)] +struct PathValue(PathBuf); + +impl<'de> de::Deserialize<'de> for PathValue { + fn deserialize(deserializer: D) -> Result + where D: de::Deserializer<'de> + { + Ok(PathValue(String::deserialize(deserializer)?.into())) + } +} + +impl ser::Serialize for PathValue { + fn serialize(&self, serializer: S) -> Result + where S: ser::Serializer, + { + self.0.serialize(serializer) + } +} + +/// Corresponds to a `target` entry, but `TomlTarget` is already used. +#[derive(Serialize, Deserialize, Debug)] +struct TomlPlatform { + dependencies: Option>, + #[serde(rename = "build-dependencies")] + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + #[serde(rename = "dev-dependencies")] + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, +} + +impl TomlTarget { + fn new() -> TomlTarget { + TomlTarget::default() + } + + fn name(&self) -> String { + match self.name { + Some(ref name) => name.clone(), + None => panic!("target name is required") + } + } + + fn proc_macro(&self) -> Option { + self.proc_macro.or(self.proc_macro2) + } + + fn crate_types(&self) -> Option<&Vec> { + self.crate_type.as_ref().or_else(|| self.crate_type2.as_ref()) + } +} + +impl fmt::Debug for PathValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +fn build_profiles(profiles: &Option) -> Profiles { + let profiles = profiles.as_ref(); + let mut profiles = Profiles { + release: merge(Profile::default_release(), + profiles.and_then(|p| p.release.as_ref())), + dev: merge(Profile::default_dev(), + profiles.and_then(|p| p.dev.as_ref())), + test: merge(Profile::default_test(), + profiles.and_then(|p| p.test.as_ref())), + test_deps: merge(Profile::default_dev(), + profiles.and_then(|p| p.dev.as_ref())), + bench: merge(Profile::default_bench(), + profiles.and_then(|p| p.bench.as_ref())), + bench_deps: merge(Profile::default_release(), + profiles.and_then(|p| p.release.as_ref())), + doc: merge(Profile::default_doc(), + profiles.and_then(|p| p.doc.as_ref())), + custom_build: Profile::default_custom_build(), + check: merge(Profile::default_check(), + profiles.and_then(|p| p.dev.as_ref())), + doctest: Profile::default_doctest(), + }; + // The test/bench targets cannot have panic=abort because they'll all get + // compiled with --test which requires the unwind runtime currently + profiles.test.panic = None; + profiles.bench.panic = None; + profiles.test_deps.panic = None; + profiles.bench_deps.panic = None; + return profiles; + + fn merge(profile: Profile, toml: Option<&TomlProfile>) -> Profile { + let &TomlProfile { + ref opt_level, lto, codegen_units, ref debug, debug_assertions, rpath, + ref panic, ref overflow_checks, + } = match toml { + Some(toml) => toml, + None => return profile, + }; + let debug = match *debug { + Some(U32OrBool::U32(debug)) => Some(Some(debug)), + Some(U32OrBool::Bool(true)) => Some(Some(2)), + Some(U32OrBool::Bool(false)) => Some(None), + None => None, + }; + Profile { + opt_level: opt_level.clone().unwrap_or(TomlOptLevel(profile.opt_level)).0, + lto: lto.unwrap_or(profile.lto), + codegen_units: codegen_units, + rustc_args: None, + rustdoc_args: None, + debuginfo: debug.unwrap_or(profile.debuginfo), + debug_assertions: debug_assertions.unwrap_or(profile.debug_assertions), + overflow_checks: overflow_checks.unwrap_or(profile.overflow_checks), + rpath: rpath.unwrap_or(profile.rpath), + test: profile.test, + doc: profile.doc, + run_custom_build: profile.run_custom_build, + check: profile.check, + panic: panic.clone().or(profile.panic), + } + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/toml/targets.rs b/collector/compile-benchmarks/cargo/src/cargo/util/toml/targets.rs new file mode 100644 index 000000000..65393527a --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/toml/targets.rs @@ -0,0 +1,493 @@ +//! This module implements Cargo conventions for directory layout: +//! +//! * `src/lib.rs` is a library +//! * `src/main.rs` is a binary +//! * `src/bin/*.rs` are binaries +//! * `examples/*.rs` are examples +//! * `tests/*.rs` are integration tests +//! * `benches/*.rs` are benchmarks +//! +//! It is a bit tricky because we need match explicit information from `Cargo.toml` +//! with implicit info in directory layout. + +use std::path::{Path, PathBuf}; +use std::fs::{self, DirEntry}; +use std::collections::HashSet; + +use core::Target; +use ops::is_bad_artifact_name; +use util::errors::CargoResult; +use super::{TomlTarget, LibKind, PathValue, TomlManifest, StringOrBool, + TomlLibTarget, TomlBinTarget, TomlBenchTarget, TomlExampleTarget, TomlTestTarget}; + + +pub fn targets(manifest: &TomlManifest, + package_name: &str, + package_root: &Path, + custom_build: &Option, + warnings: &mut Vec, + errors: &mut Vec) + -> CargoResult> { + let mut targets = Vec::new(); + + let has_lib; + + if let Some(target) = clean_lib(manifest.lib.as_ref(), package_root, package_name, warnings)? { + targets.push(target); + has_lib = true; + } else { + has_lib = false; + } + + targets.extend( + clean_bins(manifest.bin.as_ref(), package_root, package_name, warnings, has_lib)? + ); + + targets.extend( + clean_examples(manifest.example.as_ref(), package_root, errors)? + ); + + targets.extend( + clean_tests(manifest.test.as_ref(), package_root, errors)? + ); + + targets.extend( + clean_benches(manifest.bench.as_ref(), package_root, warnings, errors)? + ); + + // processing the custom build script + if let Some(custom_build) = manifest.maybe_custom_build(custom_build, package_root) { + let name = format!("build-script-{}", + custom_build.file_stem().and_then(|s| s.to_str()).unwrap_or("")); + targets.push(Target::custom_build_target(&name, package_root.join(custom_build))); + } + + Ok(targets) +} + + +fn clean_lib(toml_lib: Option<&TomlLibTarget>, + package_root: &Path, + package_name: &str, + warnings: &mut Vec) -> CargoResult> { + let inferred = inferred_lib(package_root); + let lib = match toml_lib { + Some(lib) => { + if let Some(ref name) = lib.name { + // XXX: other code paths dodge this validation + if name.contains('-') { + bail!("library target names cannot contain hyphens: {}", name) + } + } + Some(TomlTarget { + name: lib.name.clone().or_else(|| Some(package_name.to_owned())), + ..lib.clone() + }) + } + None => inferred.as_ref().map(|lib| { + TomlTarget { + name: Some(package_name.to_string()), + path: Some(PathValue(lib.clone())), + ..TomlTarget::new() + } + }) + }; + + let lib = match lib { + Some(ref lib) => lib, + None => return Ok(None) + }; + + validate_has_name(lib, "library", "lib")?; + + let path = match (lib.path.as_ref(), inferred) { + (Some(path), _) => package_root.join(&path.0), + (None, Some(path)) => path, + (None, None) => { + let legacy_path = package_root.join("src").join(format!("{}.rs", lib.name())); + if legacy_path.exists() { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for library `{}`,\n\ + please rename the file to `src/lib.rs` or set lib.path in Cargo.toml", + legacy_path.display(), lib.name() + )); + legacy_path + } else { + bail!("can't find library `{}`, \ + rename file to `src/lib.rs` or specify lib.path", lib.name()) + } + } + }; + + // Per the Macros 1.1 RFC: + // + // > Initially if a crate is compiled with the proc-macro crate type + // > (and possibly others) it will forbid exporting any items in the + // > crate other than those functions tagged #[proc_macro_derive] and + // > those functions must also be placed at the crate root. + // + // A plugin requires exporting plugin_registrar so a crate cannot be + // both at once. + let crate_types = match (lib.crate_types(), lib.plugin, lib.proc_macro()) { + (_, Some(true), Some(true)) => bail!("lib.plugin and lib.proc-macro cannot both be true"), + (Some(kinds), _, _) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), + (None, Some(true), _) => vec![LibKind::Dylib], + (None, _, Some(true)) => vec![LibKind::ProcMacro], + (None, _, _) => vec![LibKind::Lib], + }; + + let mut target = Target::lib_target(&lib.name(), crate_types, path); + configure(lib, &mut target); + Ok(Some(target)) +} + +fn clean_bins(toml_bins: Option<&Vec>, + package_root: &Path, + package_name: &str, + warnings: &mut Vec, + has_lib: bool) -> CargoResult> { + let inferred = inferred_bins(package_root, package_name); + let bins = match toml_bins { + Some(bins) => bins.clone(), + None => inferred.iter().map(|&(ref name, ref path)| { + TomlTarget { + name: Some(name.clone()), + path: Some(PathValue(path.clone())), + ..TomlTarget::new() + } + }).collect() + }; + + for bin in &bins { + validate_has_name(bin, "binary", "bin")?; + + let name = bin.name(); + if is_bad_artifact_name(&name) { + bail!("the binary target name `{}` is forbidden", name) + } + } + + validate_unique_names(&bins, "binary")?; + + let mut result = Vec::new(); + for bin in &bins { + let path = target_path(bin, &inferred, "bin", package_root, &mut |_| { + if let Some(legacy_path) = legacy_bin_path(package_root, &bin.name(), has_lib) { + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for binary `{}`,\n\ + please set bin.path in Cargo.toml", + legacy_path.display(), bin.name() + )); + Some(legacy_path) + } else { + None + } + }); + let path = match path { + Ok(path) => path, + Err(e) => bail!("{}", e), + }; + + let mut target = Target::bin_target(&bin.name(), path, + bin.required_features.clone()); + configure(bin, &mut target); + result.push(target); + } + return Ok(result); + + fn legacy_bin_path(package_root: &Path, name: &str, has_lib: bool) -> Option { + if !has_lib { + let path = package_root.join("src").join(format!("{}.rs", name)); + if path.exists() { + return Some(path); + } + } + let path = package_root.join("src").join("main.rs"); + if path.exists() { + return Some(path); + } + + let path = package_root.join("src").join("bin").join("main.rs"); + if path.exists() { + return Some(path); + } + None + } +} + +fn clean_examples(toml_examples: Option<&Vec>, + package_root: &Path, + errors: &mut Vec) + -> CargoResult> { + + let inferred = infer_from_directory(&package_root.join("examples")); + + let targets = clean_targets("example", "example", + toml_examples, &inferred, + package_root, errors)?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let crate_types = match toml.crate_types() { + Some(kinds) => kinds.iter().map(|s| LibKind::from_str(s)).collect(), + None => Vec::new() + }; + + let mut target = Target::example_target(&toml.name(), crate_types, path, + toml.required_features.clone()); + configure(&toml, &mut target); + result.push(target); + } + + Ok(result) +} + +fn clean_tests(toml_tests: Option<&Vec>, + package_root: &Path, + errors: &mut Vec) -> CargoResult> { + + let inferred = infer_from_directory(&package_root.join("tests")); + + let targets = clean_targets("test", "test", + toml_tests, &inferred, + package_root, errors)?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = Target::test_target(&toml.name(), path, + toml.required_features.clone()); + configure(&toml, &mut target); + result.push(target); + } + Ok(result) +} + +fn clean_benches(toml_benches: Option<&Vec>, + package_root: &Path, + warnings: &mut Vec, + errors: &mut Vec) -> CargoResult> { + let mut legacy_bench_path = |bench: &TomlTarget| { + let legacy_path = package_root.join("src").join("bench.rs"); + if !(bench.name() == "bench" && legacy_path.exists()) { + return None; + } + warnings.push(format!( + "path `{}` was erroneously implicitly accepted for benchmark `{}`,\n\ + please set bench.path in Cargo.toml", + legacy_path.display(), bench.name() + )); + Some(legacy_path) + }; + + let inferred = infer_from_directory(&package_root.join("benches")); + + let targets = clean_targets_with_legacy_path("benchmark", "bench", + toml_benches, &inferred, + package_root, + errors, + &mut legacy_bench_path)?; + + let mut result = Vec::new(); + for (path, toml) in targets { + let mut target = Target::bench_target(&toml.name(), path, + toml.required_features.clone()); + configure(&toml, &mut target); + result.push(target); + } + + Ok(result) +} + +fn clean_targets(target_kind_human: &str, target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + errors: &mut Vec) + -> CargoResult> { + clean_targets_with_legacy_path(target_kind_human, target_kind, + toml_targets, + inferred, + package_root, + errors, + &mut |_| None) +} + +fn clean_targets_with_legacy_path(target_kind_human: &str, target_kind: &str, + toml_targets: Option<&Vec>, + inferred: &[(String, PathBuf)], + package_root: &Path, + errors: &mut Vec, + legacy_path: &mut FnMut(&TomlTarget) -> Option) + -> CargoResult> { + let toml_targets = match toml_targets { + Some(targets) => targets.clone(), + None => inferred.iter().map(|&(ref name, ref path)| { + TomlTarget { + name: Some(name.clone()), + path: Some(PathValue(path.clone())), + ..TomlTarget::new() + } + }).collect() + }; + + for target in &toml_targets { + validate_has_name(target, target_kind_human, target_kind)?; + } + + validate_unique_names(&toml_targets, target_kind)?; + let mut result = Vec::new(); + for target in toml_targets { + let path = target_path(&target, inferred, target_kind, package_root, legacy_path); + let path = match path { + Ok(path) => path, + Err(e) => { + errors.push(e); + continue + }, + }; + result.push((path, target)); + } + Ok(result) +} + + +fn inferred_lib(package_root: &Path) -> Option { + let lib = package_root.join("src").join("lib.rs"); + if fs::metadata(&lib).is_ok() { + Some(lib) + } else { + None + } +} + +fn inferred_bins(package_root: &Path, package_name: &str) -> Vec<(String, PathBuf)> { + let main = package_root.join("src").join("main.rs"); + let mut result = Vec::new(); + if main.exists() { + result.push((package_name.to_string(), main)); + } + result.extend(infer_from_directory(&package_root.join("src").join("bin"))); + + result +} + +fn infer_from_directory(directory: &Path) -> Vec<(String, PathBuf)> { + let entries = match fs::read_dir(directory) { + Err(_) => return Vec::new(), + Ok(dir) => dir + }; + + entries + .filter_map(|e| e.ok()) + .filter(is_not_dotfile) + .filter_map(|d| infer_any(&d)) + .collect() +} + + +fn infer_any(entry: &DirEntry) -> Option<(String, PathBuf)> { + if entry.path().extension().and_then(|p| p.to_str()) == Some("rs") { + infer_file(entry) + } else if entry.file_type().map(|t| t.is_dir()).ok() == Some(true) { + infer_subdirectory(entry) + } else { + None + } +} + + +fn infer_file(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + path + .file_stem() + .and_then(|p| p.to_str()) + .map(|p| (p.to_owned(), path.clone())) +} + + +fn infer_subdirectory(entry: &DirEntry) -> Option<(String, PathBuf)> { + let path = entry.path(); + let main = path.join("main.rs"); + let name = path.file_name().and_then(|n| n.to_str()); + match (name, main.exists()) { + (Some(name), true) => Some((name.to_owned(), main)), + _ => None + } +} + + +fn is_not_dotfile(entry: &DirEntry) -> bool { + entry.file_name().to_str().map(|s| s.starts_with('.')) == Some(false) +} + + +fn validate_has_name(target: &TomlTarget, + target_kind_human: &str, + target_kind: &str) -> CargoResult<()> { + match target.name { + Some(ref name) => if name.trim().is_empty() { + bail!("{} target names cannot be empty", target_kind_human) + }, + None => bail!("{} target {}.name is required", target_kind_human, target_kind) + } + + Ok(()) +} + +/// Will check a list of toml targets, and make sure the target names are unique within a vector. +fn validate_unique_names(targets: &[TomlTarget], target_kind: &str) -> CargoResult<()> { + let mut seen = HashSet::new(); + for name in targets.iter().map(|e| e.name()) { + if !seen.insert(name.clone()) { + bail!("found duplicate {target_kind} name {name}, \ + but all {target_kind} targets must have a unique name", + target_kind = target_kind, name = name); + } + } + Ok(()) +} + + +fn configure(toml: &TomlTarget, target: &mut Target) { + let t2 = target.clone(); + target.set_tested(toml.test.unwrap_or_else(|| t2.tested())) + .set_doc(toml.doc.unwrap_or_else(|| t2.documented())) + .set_doctest(toml.doctest.unwrap_or_else(|| t2.doctested())) + .set_benched(toml.bench.unwrap_or_else(|| t2.benched())) + .set_harness(toml.harness.unwrap_or_else(|| t2.harness())) + .set_for_host(match (toml.plugin, toml.proc_macro()) { + (None, None) => t2.for_host(), + (Some(true), _) | (_, Some(true)) => true, + (Some(false), _) | (_, Some(false)) => false, + }); +} + +fn target_path(target: &TomlTarget, + inferred: &[(String, PathBuf)], + target_kind: &str, + package_root: &Path, + legacy_path: &mut FnMut(&TomlTarget) -> Option) -> Result { + if let Some(ref path) = target.path { + // Should we verify that this path exists here? + return Ok(package_root.join(&path.0)); + } + let name = target.name(); + + let mut matching = inferred.iter() + .filter(|&&(ref n, _)| n == &name) + .map(|&(_, ref p)| p.clone()); + + let first = matching.next(); + let second = matching.next(); + match (first, second) { + (Some(path), None) => Ok(path), + (None, None) | (Some(_), Some(_)) => { + if let Some(path) = legacy_path(target) { + return Ok(path); + } + Err(format!("can't find `{name}` {target_kind}, specify {target_kind}.path", + name = name, target_kind = target_kind)) + } + (None, Some(_)) => unreachable!() + } +} diff --git a/collector/compile-benchmarks/cargo/src/cargo/util/vcs.rs b/collector/compile-benchmarks/cargo/src/cargo/util/vcs.rs new file mode 100644 index 000000000..1d3188e26 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/cargo/util/vcs.rs @@ -0,0 +1,66 @@ +use std::path::Path; +use std::fs::create_dir; + +use git2; + +use util::{CargoResult, process}; + +pub struct HgRepo; +pub struct GitRepo; +pub struct PijulRepo; +pub struct FossilRepo; + +impl GitRepo { + pub fn init(path: &Path, _: &Path) -> CargoResult { + git2::Repository::init(path)?; + Ok(GitRepo) + } + pub fn discover(path: &Path, _: &Path) -> Result { + git2::Repository::discover(path) + } +} + +impl HgRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("hg").cwd(cwd).arg("init").arg(path).exec()?; + Ok(HgRepo) + } + pub fn discover(path: &Path, cwd: &Path) -> CargoResult { + process("hg").cwd(cwd).arg("root").cwd(path).exec_with_output()?; + Ok(HgRepo) + } +} + +impl PijulRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + process("pijul").cwd(cwd).arg("init").arg(path).exec()?; + Ok(PijulRepo) + } +} + +impl FossilRepo { + pub fn init(path: &Path, cwd: &Path) -> CargoResult { + // fossil doesn't create the directory so we'll do that first + create_dir(path)?; + + // set up the paths we'll use + let db_fname = ".fossil"; + let mut db_path = path.to_owned(); + db_path.push(db_fname); + + // then create the fossil DB in that location + process("fossil").cwd(cwd).arg("init").arg(&db_path).exec()?; + + // open it in that new directory + process("fossil").cwd(&path).arg("open").arg(db_fname).exec()?; + + // set `target` as ignoreable and cleanable + process("fossil").cwd(cwd).arg("settings") + .arg("ignore-glob") + .arg("target"); + process("fossil").cwd(cwd).arg("settings") + .arg("clean-glob") + .arg("target"); + Ok(FossilRepo) + } +} diff --git a/collector/compile-benchmarks/cargo/src/ci/dox.sh b/collector/compile-benchmarks/cargo/src/ci/dox.sh new file mode 100644 index 000000000..7dffde265 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/ci/dox.sh @@ -0,0 +1,33 @@ +set -ex + +DOCS="index faq config guide manifest build-script pkgid-spec crates-io \ + environment-variables specifying-dependencies source-replacement \ + external-tools" +ASSETS="CNAME images/noise.png images/forkme.png images/Cargo-Logo-Small.png \ + stylesheets/all.css stylesheets/normalize.css javascripts/prism.js \ + javascripts/all.js stylesheets/prism.css images/circle-with-i.png \ + images/search.png images/org-level-acl.png images/auth-level-acl.png \ + favicon.ico policies.html" + +for asset in $ASSETS; do + mkdir -p `dirname target/doc/$asset` + cp src/doc/$asset target/doc/$asset +done + +for doc in $DOCS; do + rustdoc \ + --markdown-no-toc \ + --markdown-css stylesheets/normalize.css \ + --markdown-css stylesheets/all.css \ + --markdown-css stylesheets/prism.css \ + --html-in-header src/doc/html-headers.html \ + --html-before-content src/doc/header.html \ + --html-after-content src/doc/footer.html \ + -o target/doc \ + src/doc/$doc.md +done + +# Temporary preview for mdBook docs +cd src/doc/book +$HOME/.cargo/bin/mdbook build --no-create --dest-dir ../../../target/doc/book +cd ../../../ diff --git a/collector/compile-benchmarks/cargo/src/crates-io/Cargo.toml b/collector/compile-benchmarks/cargo/src/crates-io/Cargo.toml new file mode 100644 index 000000000..25a07376d --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/crates-io/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "crates-io" +version = "0.12.0" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang/cargo" +description = """ +Helpers for interacting with crates.io +""" + +[lib] +name = "crates_io" +path = "lib.rs" + +[dependencies] +curl = "0.4" +error-chain = "0.11.0-rc.2" +serde = "1.0" +serde_derive = "1.0" +serde_json = "1.0" +url = "1.0" diff --git a/collector/compile-benchmarks/cargo/src/crates-io/lib.rs b/collector/compile-benchmarks/cargo/src/crates-io/lib.rs new file mode 100644 index 000000000..910d51a37 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/crates-io/lib.rs @@ -0,0 +1,343 @@ +#![allow(unknown_lints)] + +extern crate curl; +extern crate url; +#[macro_use] +extern crate error_chain; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; + +use std::collections::BTreeMap; +use std::fs::File; +use std::io::prelude::*; +use std::io::{self, Cursor}; + +use curl::easy::{Easy, List}; + +use url::percent_encoding::{percent_encode, QUERY_ENCODE_SET}; + +error_chain! { + foreign_links { + Curl(curl::Error); + Io(io::Error); + Json(serde_json::Error); + } + + errors { + NotOkResponse(code: u32, headers: Vec, body: Vec){ + description("failed to get a 200 OK response") + display("failed to get a 200 OK response, got {} +headers: + {} +body: +{}", code, headers.join("\n ", ), String::from_utf8_lossy(body)) + } + NonUtf8Body { + description("response body was not utf-8") + display("response body was not utf-8") + } + Api(errs: Vec) { + display("api errors: {}", errs.join(", ")) + } + Unauthorized { + display("unauthorized API access") + } + TokenMissing{ + display("no upload token found, please run `cargo login`") + } + NotFound { + display("cannot find crate") + } + } + } + +pub struct Registry { + host: String, + token: Option, + handle: Easy, +} + +#[derive(PartialEq, Clone, Copy)] +pub enum Auth { + Authorized, + Unauthorized, +} + +#[derive(Deserialize)] +pub struct Crate { + pub name: String, + pub description: Option, + pub max_version: String, +} + +#[derive(Serialize)] +pub struct NewCrate { + pub name: String, + pub vers: String, + pub deps: Vec, + pub features: BTreeMap>, + pub authors: Vec, + pub description: Option, + pub documentation: Option, + pub homepage: Option, + pub readme: Option, + pub keywords: Vec, + pub categories: Vec, + pub license: Option, + pub license_file: Option, + pub repository: Option, + pub badges: BTreeMap>, +} + +#[derive(Serialize)] +pub struct NewCrateDependency { + pub optional: bool, + pub default_features: bool, + pub name: String, + pub features: Vec, + pub version_req: String, + pub target: Option, + pub kind: String, +} + +#[derive(Deserialize)] +pub struct User { + pub id: u32, + pub login: String, + pub avatar: Option, + pub email: Option, + pub name: Option, +} + +pub struct Warnings { + pub invalid_categories: Vec, + pub invalid_badges: Vec, +} + +#[derive(Deserialize)] struct R { ok: bool } +#[derive(Deserialize)] struct OwnerResponse { ok: bool, msg: String } +#[derive(Deserialize)] struct ApiErrorList { errors: Vec } +#[derive(Deserialize)] struct ApiError { detail: String } +#[derive(Serialize)] struct OwnersReq<'a> { users: &'a [&'a str] } +#[derive(Deserialize)] struct Users { users: Vec } +#[derive(Deserialize)] struct TotalCrates { total: u32 } +#[derive(Deserialize)] struct Crates { crates: Vec, meta: TotalCrates } +impl Registry { + pub fn new(host: String, token: Option) -> Registry { + Registry::new_handle(host, token, Easy::new()) + } + + pub fn new_handle(host: String, + token: Option, + handle: Easy) -> Registry { + Registry { + host: host, + token: token, + handle: handle, + } + } + + pub fn add_owners(&mut self, krate: &str, owners: &[&str]) -> Result { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.put(format!("/crates/{}/owners", krate), + body.as_bytes())?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(serde_json::from_str::(&body)?.msg) + } + + pub fn remove_owners(&mut self, krate: &str, owners: &[&str]) -> Result<()> { + let body = serde_json::to_string(&OwnersReq { users: owners })?; + let body = self.delete(format!("/crates/{}/owners", krate), + Some(body.as_bytes()))?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn list_owners(&mut self, krate: &str) -> Result> { + let body = self.get(format!("/crates/{}/owners", krate))?; + Ok(serde_json::from_str::(&body)?.users) + } + + pub fn publish(&mut self, krate: &NewCrate, tarball: &File) + -> Result { + let json = serde_json::to_string(krate)?; + // Prepare the body. The format of the upload request is: + // + // + // (metadata for the package) + // + // + let stat = tarball.metadata()?; + let header = { + let mut w = Vec::new(); + w.extend([ + (json.len() >> 0) as u8, + (json.len() >> 8) as u8, + (json.len() >> 16) as u8, + (json.len() >> 24) as u8, + ].iter().map(|x| *x)); + w.extend(json.as_bytes().iter().map(|x| *x)); + w.extend([ + (stat.len() >> 0) as u8, + (stat.len() >> 8) as u8, + (stat.len() >> 16) as u8, + (stat.len() >> 24) as u8, + ].iter().map(|x| *x)); + w + }; + let size = stat.len() as usize + header.len(); + let mut body = Cursor::new(header).chain(tarball); + + let url = format!("{}/api/v1/crates/new", self.host); + + let token = match self.token.as_ref() { + Some(s) => s, + None => return Err(Error::from_kind(ErrorKind::TokenMissing)), + }; + self.handle.put(true)?; + self.handle.url(&url)?; + self.handle.in_filesize(size as u64)?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append(&format!("Authorization: {}", token))?; + self.handle.http_headers(headers)?; + + let body = handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0))?; + + let response = if body.len() > 0 { + body.parse::()? + } else { + "{}".parse()? + }; + + let invalid_categories: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_categories")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + let invalid_badges: Vec = response + .get("warnings") + .and_then(|j| j.get("invalid_badges")) + .and_then(|j| j.as_array()) + .map(|x| x.iter().flat_map(|j| j.as_str()).map(Into::into).collect()) + .unwrap_or_else(Vec::new); + + Ok(Warnings { + invalid_categories: invalid_categories, + invalid_badges: invalid_badges, + }) + } + + pub fn search(&mut self, query: &str, limit: u8) -> Result<(Vec, u32)> { + let formated_query = percent_encode(query.as_bytes(), QUERY_ENCODE_SET); + let body = self.req( + format!("/crates?q={}&per_page={}", formated_query, limit), + None, Auth::Unauthorized + )?; + + let crates = serde_json::from_str::(&body)?; + Ok((crates.crates, crates.meta.total)) + } + + pub fn yank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.delete(format!("/crates/{}/{}/yank", krate, version), + None)?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + pub fn unyank(&mut self, krate: &str, version: &str) -> Result<()> { + let body = self.put(format!("/crates/{}/{}/unyank", krate, version), + &[])?; + assert!(serde_json::from_str::(&body)?.ok); + Ok(()) + } + + fn put(&mut self, path: String, b: &[u8]) -> Result { + self.handle.put(true)?; + self.req(path, Some(b), Auth::Authorized) + } + + fn get(&mut self, path: String) -> Result { + self.handle.get(true)?; + self.req(path, None, Auth::Authorized) + } + + fn delete(&mut self, path: String, b: Option<&[u8]>) -> Result { + self.handle.custom_request("DELETE")?; + self.req(path, b, Auth::Authorized) + } + + fn req(&mut self, + path: String, + body: Option<&[u8]>, + authorized: Auth) -> Result { + self.handle.url(&format!("{}/api/v1{}", self.host, path))?; + let mut headers = List::new(); + headers.append("Accept: application/json")?; + headers.append("Content-Type: application/json")?; + + if authorized == Auth::Authorized { + let token = match self.token.as_ref() { + Some(s) => s, + None => return Err(Error::from_kind(ErrorKind::TokenMissing)), + }; + headers.append(&format!("Authorization: {}", token))?; + } + self.handle.http_headers(headers)?; + match body { + Some(mut body) => { + self.handle.upload(true)?; + self.handle.in_filesize(body.len() as u64)?; + handle(&mut self.handle, &mut |buf| body.read(buf).unwrap_or(0)) + } + None => handle(&mut self.handle, &mut |_| 0), + } + } +} + +fn handle(handle: &mut Easy, + read: &mut FnMut(&mut [u8]) -> usize) -> Result { + let mut headers = Vec::new(); + let mut body = Vec::new(); + { + let mut handle = handle.transfer(); + handle.read_function(|buf| Ok(read(buf)))?; + handle.write_function(|data| { + body.extend_from_slice(data); + Ok(data.len()) + })?; + handle.header_function(|data| { + headers.push(String::from_utf8_lossy(data).into_owned()); + true + })?; + handle.perform()?; + } + + match handle.response_code()? { + 0 => {} // file upload url sometimes + 200 => {} + 403 => return Err(Error::from_kind(ErrorKind::Unauthorized)), + 404 => return Err(Error::from_kind(ErrorKind::NotFound)), + code => return Err(Error::from_kind(ErrorKind::NotOkResponse(code, headers, body))), + } + + let body = match String::from_utf8(body) { + Ok(body) => body, + Err(..) => return Err(Error::from_kind(ErrorKind::NonUtf8Body)), + }; + match serde_json::from_str::(&body) { + Ok(errors) => { + return Err(Error::from_kind(ErrorKind::Api(errors + .errors + .into_iter() + .map(|s| s.detail) + .collect()))) + } + Err(..) => {} + } + Ok(body) +} diff --git a/collector/compile-benchmarks/cargo/src/doc/CNAME b/collector/compile-benchmarks/cargo/src/doc/CNAME new file mode 100644 index 000000000..b68cc5511 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/CNAME @@ -0,0 +1 @@ +doc.crates.io diff --git a/collector/compile-benchmarks/cargo/src/doc/MIGRATION_MAP b/collector/compile-benchmarks/cargo/src/doc/MIGRATION_MAP new file mode 100644 index 000000000..433a7851f --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/MIGRATION_MAP @@ -0,0 +1,12 @@ +index.md book/src/index.md book/src/getting-started/index.md book/src/getting-started/*.md +guide.md book/src/guide/index.md book/src/guide/*.md +build-script.md book/src/reference/build-scripts.md +config.md book/src/reference/config.md +crates-io.md book/src/reference/publishing.md +environment-variables.md book/src/reference/environment-variables.md +external-tools.md book/src/reference/external-tools.md +manifest.md book/src/reference/manifest.md +pkgid-spec.md book/src/reference/pkgid-spec.md +source-replacement.md book/src/reference/source-replacement.md +specifying-dependencies.md book/src/reference/specifying-dependencies.md +faq.md book/src/faq.md diff --git a/collector/compile-benchmarks/cargo/src/doc/README.md b/collector/compile-benchmarks/cargo/src/doc/README.md new file mode 100644 index 000000000..e68b5ca60 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/README.md @@ -0,0 +1,6 @@ +# Cargo Documentation + +NOTE: Cargo documentation is under migration to mdBook-based structure. All the +`*.md` files here shall be kept in sync with the `*.md` files under `book/src/`. +See `MIGRATION_MAP` file here and +for details. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/.gitignore b/collector/compile-benchmarks/cargo/src/doc/book/.gitignore new file mode 100644 index 000000000..5a0bf0317 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/.gitignore @@ -0,0 +1 @@ +/book diff --git a/collector/compile-benchmarks/cargo/src/doc/book/README.md b/collector/compile-benchmarks/cargo/src/doc/book/README.md new file mode 100644 index 000000000..b24da689e --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/README.md @@ -0,0 +1,47 @@ +# The Cargo Book + + +### Requirements + +Building the book requires [mdBook]. To get it: + +[mdBook]: https://github.com/azerupi/mdBook + +```shell +$ cargo install mdbook +``` + +### Building + +To build the book: + +```shell +$ mdbook build +``` + +The output will be in the `book` subdirectory. To check it out, open it in +your web browser. + +_Firefox:_ +```shell +$ firefox book/index.html # Linux +$ open -a "Firefox" book/index.html # OS X +$ Start-Process "firefox.exe" .\book\index.html # Windows (PowerShell) +$ start firefox.exe .\book\index.html # Windows (Cmd) +``` + +_Chrome:_ +```shell +$ google-chrome book/index.html # Linux +$ open -a "Google Chrome" book/index.html # OS X +$ Start-Process "chrome.exe" .\book\index.html # Windows (PowerShell) +$ start chrome.exe .\book\index.html # Windows (Cmd) +``` + + +## Contributing + +Given that the book is still in a draft state, we'd love your help! Please feel free to open +issues about anything, and send in PRs for things you'd like to fix or change. If your change is +large, please open an issue first, so we can make sure that it's something we'd accept before you +go through the work of getting a PR together. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/book.toml b/collector/compile-benchmarks/cargo/src/doc/book/book.toml new file mode 100644 index 000000000..1b84b2978 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/book.toml @@ -0,0 +1,2 @@ +title = "The Cargo Manual" +author = "Alex Crichton, Steve Klabnik and Carol Nichols, with Contributions from the Rust Community" diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/SUMMARY.md b/collector/compile-benchmarks/cargo/src/doc/book/src/SUMMARY.md new file mode 100644 index 000000000..5f46bfade --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/SUMMARY.md @@ -0,0 +1,30 @@ +# Summary + +[Introduction](index.md) + +* [Getting Started](getting-started/index.md) + * [Installation](getting-started/installation.md) + * [First Steps with Cargo](getting-started/first-steps.md) + +* [Cargo Guide](guide/index.md) + * [Why Cargo Exists](guide/why-cargo-exists.md) + * [Creating a New Project](guide/creating-a-new-project.md) + * [Working on an Existing Project](guide/working-on-an-existing-project.md) + * [Dependencies](guide/dependencies.md) + * [Project Layout](guide/project-layout.md) + * [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.md) + * [Tests](guide/tests.md) + * [Continuous Integration](guide/continuous-integration.md) + +* [Cargo Reference](reference/index.md) + * [Specifying Dependencies](reference/specifying-dependencies.md) + * [The Manifest Format](reference/manifest.md) + * [Configuration](reference/config.md) + * [Environment Variables](reference/environment-variables.md) + * [Build Scripts](reference/build-scripts.md) + * [Publishing on crates.io](reference/publishing.md) + * [Package ID Specifications](reference/pkgid-spec.md) + * [Source Replacement](reference/source-replacement.md) + * [External Tools](reference/external-tools.md) + +* [FAQ](faq.md) diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/faq.md b/collector/compile-benchmarks/cargo/src/doc/book/src/faq.md new file mode 100644 index 000000000..7f13573fd --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/faq.md @@ -0,0 +1,193 @@ +## Frequently Asked Questions + +### Is the plan to use GitHub as a package repository? + +No. The plan for Cargo is to use [crates.io], like npm or Rubygems do with +npmjs.org and rubygems.org. + +We plan to support git repositories as a source of packages forever, +because they can be used for early development and temporary patches, +even when people use the registry as the primary source of packages. + +### Why build crates.io rather than use GitHub as a registry? + +We think that it’s very important to support multiple ways to download +packages, including downloading from GitHub and copying packages into +your project itself. + +That said, we think that [crates.io] offers a number of important benefits, and +will likely become the primary way that people download packages in Cargo. + +For precedent, both Node.js’s [npm][1] and Ruby’s [bundler][2] support both a +central registry model as well as a Git-based model, and most packages +are downloaded through the registry in those ecosystems, with an +important minority of packages making use of git-based packages. + +[1]: https://www.npmjs.org +[2]: https://bundler.io + +Some of the advantages that make a central registry popular in other +languages include: + +* **Discoverability**. A central registry provides an easy place to look + for existing packages. Combined with tagging, this also makes it + possible for a registry to provide ecosystem-wide information, such as a + list of the most popular or most-depended-on packages. +* **Speed**. A central registry makes it possible to easily fetch just + the metadata for packages quickly and efficiently, and then to + efficiently download just the published package, and not other bloat + that happens to exist in the repository. This adds up to a significant + improvement in the speed of dependency resolution and fetching. As + dependency graphs scale up, downloading all of the git repositories bogs + down fast. Also remember that not everybody has a high-speed, + low-latency Internet connection. + +### Will Cargo work with C code (or other languages)? + +Yes! + +Cargo handles compiling Rust code, but we know that many Rust projects +link against C code. We also know that there are decades of tooling +built up around compiling languages other than Rust. + +Our solution: Cargo allows a package to [specify a script](reference/build-scripts.html) +(written in Rust) to run before invoking `rustc`. Rust is leveraged to +implement platform-specific configuration and refactor out common build +functionality among packages. + +### Can Cargo be used inside of `make` (or `ninja`, or ...) + +Indeed. While we intend Cargo to be useful as a standalone way to +compile Rust projects at the top-level, we know that some people will +want to invoke Cargo from other build tools. + +We have designed Cargo to work well in those contexts, paying attention +to things like error codes and machine-readable output modes. We still +have some work to do on those fronts, but using Cargo in the context of +conventional scripts is something we designed for from the beginning and +will continue to prioritize. + +### Does Cargo handle multi-platform projects or cross-compilation? + +Rust itself provides facilities for configuring sections of code based +on the platform. Cargo also supports [platform-specific +dependencies][target-deps], and we plan to support more per-platform +configuration in `Cargo.toml` in the future. + +[target-deps]: reference/manifest.html#the-dependencies-section + +In the longer-term, we’re looking at ways to conveniently cross-compile +projects using Cargo. + +### Does Cargo support environments, like `production` or `test`? + +We support environments through the use of [profiles][profile] to support: + +[profile]: reference/manifest.html#the-profile-sections + +* environment-specific flags (like `-g --opt-level=0` for development + and `--opt-level=3` for production). +* environment-specific dependencies (like `hamcrest` for test assertions). +* environment-specific `#[cfg]` +* a `cargo test` command + +### Does Cargo work on Windows? + +Yes! + +All commits to Cargo are required to pass the local test suite on Windows. +If, however, you find a Windows issue, we consider it a bug, so [please file an +issue][3]. + +[3]: https://github.com/rust-lang/cargo/issues + +### Why do binaries have `Cargo.lock` in version control, but not libraries? + +The purpose of a `Cargo.lock` is to describe the state of the world at the time +of a successful build. It is then used to provide deterministic builds across +whatever machine is building the project by ensuring that the exact same +dependencies are being compiled. + +This property is most desirable from applications and projects which are at the +very end of the dependency chain (binaries). As a result, it is recommended that +all binaries check in their `Cargo.lock`. + +For libraries the situation is somewhat different. A library is not only used by +the library developers, but also any downstream consumers of the library. Users +dependent on the library will not inspect the library’s `Cargo.lock` (even if it +exists). This is precisely because a library should **not** be deterministically +recompiled for all users of the library. + +If a library ends up being used transitively by several dependencies, it’s +likely that just a single copy of the library is desired (based on semver +compatibility). If all libraries were to check in their `Cargo.lock`, then +multiple copies of the library would be used, and perhaps even a version +conflict. + +In other words, libraries specify semver requirements for their dependencies but +cannot see the full picture. Only end products like binaries have a full +picture to decide what versions of dependencies should be used. + +### Can libraries use `*` as a version for their dependencies? + +**As of January 22nd, 2016, [crates.io] rejects all packages (not just libraries) +with wildcard dependency constraints.** + +While libraries _can_, strictly speaking, they should not. A version requirement +of `*` says “This will work with every version ever,” which is never going +to be true. Libraries should always specify the range that they do work with, +even if it’s something as general as “every 1.x.y version.” + +### Why `Cargo.toml`? + +As one of the most frequent interactions with Cargo, the question of why the +configuration file is named `Cargo.toml` arises from time to time. The leading +capital-`C` was chosen to ensure that the manifest was grouped with other +similar configuration files in directory listings. Sorting files often puts +capital letters before lowercase letters, ensuring files like `Makefile` and +`Cargo.toml` are placed together. The trailing `.toml` was chosen to emphasize +the fact that the file is in the [TOML configuration +format](https://github.com/toml-lang/toml). + +Cargo does not allow other names such as `cargo.toml` or `Cargofile` to +emphasize the ease of how a Cargo repository can be identified. An option of +many possible names has historically led to confusion where one case was handled +but others were accidentally forgotten. + +[crates.io]: https://crates.io/ + +### How can Cargo work offline? + +Cargo is often used in situations with limited or no network access such as +airplanes, CI environments, or embedded in large production deployments. Users +are often surprised when Cargo attempts to fetch resources from the network, and +hence the request for Cargo to work offline comes up frequently. + +Cargo, at its heart, will not attempt to access the network unless told to do +so. That is, if no crates comes from crates.io, a git repository, or some other +network location, Cargo will never attempt to make a network connection. As a +result, if Cargo attempts to touch the network, then it's because it needs to +fetch a required resource. + +Cargo is also quite aggressive about caching information to minimize the amount +of network activity. It will guarantee, for example, that if `cargo build` (or +an equivalent) is run to completion then the next `cargo build` is guaranteed to +not touch the network so long as `Cargo.toml` has not been modified in the +meantime. This avoidance of the network boils down to a `Cargo.lock` existing +and a populated cache of the crates reflected in the lock file. If either of +these components are missing, then they're required for the build to succeed and +must be fetched remotely. + +As of Rust 1.11.0 Cargo understands a new flag, `--frozen`, which is an +assertion that it shouldn't touch the network. When passed, Cargo will +immediately return an error if it would otherwise attempt a network request. +The error should include contextual information about why the network request is +being made in the first place to help debug as well. Note that this flag *does +not change the behavior of Cargo*, it simply asserts that Cargo shouldn't touch +the network as a previous command has been run to ensure that network activity +shouldn't be necessary. + +For more information about vendoring, see documentation on [source +replacement][replace]. + +[replace]: reference/source-replacement.html diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/first-steps.md b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/first-steps.md new file mode 100644 index 000000000..190f69f55 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/first-steps.md @@ -0,0 +1,70 @@ +## First Steps with Cargo + +To start a new project with Cargo, use `cargo new`: + +```shell +$ cargo new hello_world --bin +``` + +We’re passing `--bin` because we’re making a binary program: if we +were making a library, we’d leave it off. + +Let’s check out what Cargo has generated for us: + +```shell +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +This is all we need to get started. First, let’s check out `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your project. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```shell +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +And then run it: + +```shell +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step: + +```shell +$ cargo run + Fresh hello_world v0.1.0 (file:///path/to/project/hello_world) + Running `target/hello_world` +Hello, world! +``` + +### Going further + +For more details on using Cargo, check out the [Cargo Guide](guide/index.html) diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/index.md b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/index.md new file mode 100644 index 000000000..22a7315cf --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/index.md @@ -0,0 +1,6 @@ +## Getting Started + +To get started with Cargo, install Cargo (and Rust) and set up your first crate. + +* [Installation](getting-started/installation.html) +* [First steps with Cargo](getting-started/first-steps.html) diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/installation.md b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/installation.md new file mode 100644 index 000000000..8428a9063 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/getting-started/installation.md @@ -0,0 +1,38 @@ +## Installation + +### Install Stable Rust and Cargo + +The easiest way to get Cargo is to get the current stable release of [Rust] by +using the `rustup` script: + +```shell +$ curl -sSf https://static.rust-lang.org/rustup.sh | sh +``` + +After this, you can use the `rustup` command to also install `beta` or `nightly` +channels for Rust and Cargo. + +### Install Nightly Cargo + +To install just Cargo, the current recommended installation method is through +the official nightly builds. Note that Cargo will also require that [Rust] is +already installed on the system. + +| Platform | 64-bit | 32-bit | +|------------------|-------------------|-------------------| +| Linux binaries | [tar.gz][linux64] | [tar.gz][linux32] | +| MacOS binaries | [tar.gz][mac64] | [tar.gz][mac32] | +| Windows binaries | [tar.gz][win64] | [tar.gz][win32] | + +### Build and Install Cargo from Source + +Alternatively, you can [build Cargo from source][compiling-from-source]. + +[rust]: https://www.rust-lang.org/ +[linux64]: https://static.rust-lang.org/cargo-dist/cargo-nightly-x86_64-unknown-linux-gnu.tar.gz +[linux32]: https://static.rust-lang.org/cargo-dist/cargo-nightly-i686-unknown-linux-gnu.tar.gz +[mac64]: https://static.rust-lang.org/cargo-dist/cargo-nightly-x86_64-apple-darwin.tar.gz +[mac32]: https://static.rust-lang.org/cargo-dist/cargo-nightly-i686-apple-darwin.tar.gz +[win64]: https://static.rust-lang.org/cargo-dist/cargo-nightly-x86_64-pc-windows-gnu.tar.gz +[win32]: https://static.rust-lang.org/cargo-dist/cargo-nightly-i686-pc-windows-gnu.tar.gz +[compiling-from-source]: https://github.com/rust-lang/cargo#compiling-from-source diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/cargo-toml-vs-cargo-lock.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/cargo-toml-vs-cargo-lock.md new file mode 100644 index 000000000..574a6677f --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/cargo-toml-vs-cargo-lock.md @@ -0,0 +1,103 @@ +## Cargo.toml vs Cargo.lock + +`Cargo.toml` and `Cargo.lock` serve two different purposes. Before we talk +about them, here’s a summary: + +* `Cargo.toml` is about describing your dependencies in a broad sense, and is + written by you. +* `Cargo.lock` contains exact information about your dependencies. It is + maintained by Cargo and should not be manually edited. + +If you’re building a library that other projects will depend on, put +`Cargo.lock` in your `.gitignore`. If you’re building an executable like a +command-line tool or an application, check `Cargo.lock` into `git`. If you're +curious about why that is, see ["Why do binaries have `Cargo.lock` in version +control, but not libraries?" in the +FAQ](faq.html#why-do-binaries-have-cargolock-in-version-control-but-not-libraries). + +Let’s dig in a little bit more. + +`Cargo.toml` is a **manifest** file in which we can specify a bunch of +different metadata about our project. For example, we can say that we depend +on another project: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +This project has a single dependency, on the `rand` library. We’ve stated in +this case that we’re relying on a particular Git repository that lives on +GitHub. Since we haven’t specified any other information, Cargo assumes that +we intend to use the latest commit on the `master` branch to build our project. + +Sound good? Well, there’s one problem: If you build this project today, and +then you send a copy to me, and I build this project tomorrow, something bad +could happen. There could be more commits to `rand` in the meantime, and my +build would include new commits while yours would not. Therefore, we would +get different builds. This would be bad because we want reproducible builds. + +We could fix this problem by putting a `rev` line in our `Cargo.toml`: + +```toml +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git", rev = "9f35b8e" } +``` + +Now our builds will be the same. But there’s a big drawback: now we have to +manually think about SHA-1s every time we want to update our library. This is +both tedious and error prone. + +Enter the `Cargo.lock`. Because of its existence, we don’t need to manually +keep track of the exact revisions: Cargo will do it for us. When we have a +manifest like this: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +rand = { git = "https://github.com/rust-lang-nursery/rand.git" } +``` + +Cargo will take the latest commit and write that information out into our +`Cargo.lock` when we build for the first time. That file will look like this: + +```toml +[[package]] +name = "hello_world" +version = "0.1.0" +dependencies = [ + "rand 0.1.0 (git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9)", +] + +[[package]] +name = "rand" +version = "0.1.0" +source = "git+https://github.com/rust-lang-nursery/rand.git#9f35b8e439eeedd60b9414c58f389bdc6a3284f9" +``` + +You can see that there’s a lot more information here, including the exact +revision we used to build. Now when you give your project to someone else, +they’ll use the exact same SHA, even though we didn’t specify it in our +`Cargo.toml`. + +When we’re ready to opt in to a new version of the library, Cargo can +re-calculate the dependencies and update things for us: + +```shell +$ cargo update # updates all dependencies +$ cargo update -p rand # updates just “rand” +``` + +This will write out a new `Cargo.lock` with the new version information. Note +that the argument to `cargo update` is actually a +[Package ID Specification](reference/pkgid-spec.html) and `rand` is just a short +specification. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/continuous-integration.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/continuous-integration.md new file mode 100644 index 000000000..6e5efe72c --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/continuous-integration.md @@ -0,0 +1,21 @@ +## Continuous Integration + +### Travis CI + +To test your project on Travis CI, here is a sample `.travis.yml` file: + +```yaml +language: rust +rust: + - stable + - beta + - nightly +matrix: + allow_failures: + - rust: nightly +``` + +This will test all three release channels, but any breakage in nightly +will not fail your overall build. Please see the [Travis CI Rust +documentation](https://docs.travis-ci.com/user/languages/rust/) for more +information. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/creating-a-new-project.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/creating-a-new-project.md new file mode 100644 index 000000000..3f0c90e3c --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/creating-a-new-project.md @@ -0,0 +1,89 @@ +## Creating a New Project + +To start a new project with Cargo, use `cargo new`: + +```shell +$ cargo new hello_world --bin +``` + +We’re passing `--bin` because we’re making a binary program: if we +were making a library, we’d leave it off. This also initializes a new `git` +repository by default. If you don't want it to do that, pass `--vcs none`. + +Let’s check out what Cargo has generated for us: + +```shell +$ cd hello_world +$ tree . +. +├── Cargo.toml +└── src + └── main.rs + +1 directory, 2 files +``` + +If we had just used `cargo new hello_world` without the `--bin` flag, then +we would have a `lib.rs` instead of a `main.rs`. For now, however, this is all +we need to get started. First, let’s check out `Cargo.toml`: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] +``` + +This is called a **manifest**, and it contains all of the metadata that Cargo +needs to compile your project. + +Here’s what’s in `src/main.rs`: + +```rust +fn main() { + println!("Hello, world!"); +} +``` + +Cargo generated a “hello world” for us. Let’s compile it: + +```shell +$ cargo build + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +And then run it: + +```shell +$ ./target/debug/hello_world +Hello, world! +``` + +We can also use `cargo run` to compile and then run it, all in one step (You +won't see the `Compiling` line if you have not made any changes since you last +compiled): + +```shell +$ cargo run + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) + Running `target/debug/hello_world` +Hello, world! +``` + +You’ll now notice a new file, `Cargo.lock`. It contains information about our +dependencies. Since we don’t have any yet, it’s not very interesting. + +Once you’re ready for release, you can use `cargo build --release` to compile +your files with optimizations turned on: + +```shell +$ cargo build --release + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +`cargo build --release` puts the resulting binary in `target/release` instead of +`target/debug`. + +Compiling in debug mode is the default for development-- compilation time is +shorter since the compiler doesn't do optimizations, but the code will run +slower. Release mode takes longer to compile, but the code will run faster. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/dependencies.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/dependencies.md new file mode 100644 index 000000000..e199487f2 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/dependencies.md @@ -0,0 +1,90 @@ +## Dependencies + +[crates.io] is the Rust community's central package registry that serves as a +location to discover and download packages. `cargo` is configured to use it by +default to find requested packages. + +To depend on a library hosted on [crates.io], add it to your `Cargo.toml`. + +[crates.io]: https://crates.io/ + +### Adding a dependency + +If your `Cargo.toml` doesn't already have a `[dependencies]` section, add that, +then list the crate name and version that you would like to use. This example +adds a dependency of the `time` crate: + +```toml +[dependencies] +time = "0.1.12" +``` + +The version string is a [semver] version requirement. The [specifying +dependencies](reference/specifying-dependencies.html) docs have more information about +the options you have here. + +[semver]: https://github.com/steveklabnik/semver#requirements + +If we also wanted to add a dependency on the `regex` crate, we would not need +to add `[dependencies]` for each crate listed. Here's what your whole +`Cargo.toml` file would look like with dependencies on the `time` and `regex` +crates: + +```toml +[package] +name = "hello_world" +version = "0.1.0" +authors = ["Your Name "] + +[dependencies] +time = "0.1.12" +regex = "0.1.41" +``` + +Re-run `cargo build`, and Cargo will fetch the new dependencies and all of +their dependencies, compile them all, and update the `Cargo.lock`: + +```shell +$ cargo build + Updating registry `https://github.com/rust-lang/crates.io-index` + Downloading memchr v0.1.5 + Downloading libc v0.1.10 + Downloading regex-syntax v0.2.1 + Downloading memchr v0.1.5 + Downloading aho-corasick v0.3.0 + Downloading regex v0.1.41 + Compiling memchr v0.1.5 + Compiling libc v0.1.10 + Compiling regex-syntax v0.2.1 + Compiling memchr v0.1.5 + Compiling aho-corasick v0.3.0 + Compiling regex v0.1.41 + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) +``` + +Our `Cargo.lock` contains the exact information about which revision of all of +these dependencies we used. + +Now, if `regex` gets updated, we will still build with the same revision until +we choose to `cargo update`. + +You can now use the `regex` library using `extern crate` in `main.rs`. + +``` +extern crate regex; + +use regex::Regex; + +fn main() { + let re = Regex::new(r"^\d{4}-\d{2}-\d{2}$").unwrap(); + println!("Did our date match? {}", re.is_match("2014-01-01")); +} +``` + +Running it will show: + +```shell +$ cargo run + Running `target/hello_world` +Did our date match? true +``` diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/index.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/index.md new file mode 100644 index 000000000..d8bfda17c --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/index.md @@ -0,0 +1,13 @@ +## Cargo Guide + +This guide will give you all that you need to know about how to use Cargo to +develop Rust projects. + +* [Why Cargo Exists](guide/why-cargo-exists.html) +* [Creating a New Project](guide/creating-a-new-project.html) +* [Working on an Existing Cargo Project](guide/working-on-an-existing-project.html) +* [Dependencies](guide/dependencies.html) +* [Project Layout](guide/project-layout.html) +* [Cargo.toml vs Cargo.lock](guide/cargo-toml-vs-cargo-lock.html) +* [Tests](guide/tests.html) +* [Continuous Integration](guide/continuous-integration.html) diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/project-layout.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/project-layout.md new file mode 100644 index 000000000..f9eb7d331 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/project-layout.md @@ -0,0 +1,35 @@ +## Project Layout + +Cargo uses conventions for file placement to make it easy to dive into a new +Cargo project: + +```shell +. +├── Cargo.lock +├── Cargo.toml +├── benches +│   └── large-input.rs +├── examples +│   └── simple.rs +├── src +│   ├── bin +│   │   └── another_executable.rs +│   ├── lib.rs +│   └── main.rs +└── tests + └── some-integration-tests.rs +``` + +* `Cargo.toml` and `Cargo.lock` are stored in the root of your project (*package + root*). +* Source code goes in the `src` directory. +* The default library file is `src/lib.rs`. +* The default executable file is `src/main.rs`. +* Other executables can be placed in `src/bin/*.rs`. +* Integration tests go in the `tests` directory (unit tests go in each file + they're testing). +* Examples go in the `examples` directory. +* Benchmarks go in the `benches` directory. + +These are explained in more detail in the [manifest +description](reference/manifest.html#the-project-layout). diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/tests.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/tests.md new file mode 100644 index 000000000..743a83f85 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/tests.md @@ -0,0 +1,39 @@ +## Tests + +Cargo can run your tests with the `cargo test` command. Cargo looks for tests +to run in two places: in each of your `src` files and any tests in `tests/`. +Tests in your `src` files should be unit tests, and tests in `tests/` should be +integration-style tests. As such, you’ll need to import your crates into +the files in `tests`. + +Here's an example of running `cargo test` in our project, which currently has +no tests: + +```shell +$ cargo test + Compiling rand v0.1.0 (https://github.com/rust-lang-nursery/rand.git#9f35b8e) + Compiling hello_world v0.1.0 (file:///path/to/project/hello_world) + Running target/test/hello_world-9c2b65bbb79eabce + +running 0 tests + +test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out +``` + +If our project had tests, we would see more output with the correct number of +tests. + +You can also run a specific test by passing a filter: + +```shell +$ cargo test foo +``` + +This will run any test with `foo` in its name. + +`cargo test` runs additional checks as well. For example, it will compile any +examples you’ve included and will also test the examples in your +documentation. Please see the [testing guide][testing] in the Rust +documentation for more details. + +[testing]: https://doc.rust-lang.org/book/testing.html diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/why-cargo-exists.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/why-cargo-exists.md new file mode 100644 index 000000000..9c5d0d2dd --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/why-cargo-exists.md @@ -0,0 +1,12 @@ +## Why Cargo Exists + +Cargo is a tool that allows Rust projects to declare their various +dependencies and ensure that you’ll always get a repeatable build. + +To accomplish this goal, Cargo does four things: + +* Introduces two metadata files with various bits of project information. +* Fetches and builds your project’s dependencies. +* Invokes `rustc` or another build tool with the correct parameters to build + your project. +* Introduces conventions to make working with Rust projects easier. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/guide/working-on-an-existing-project.md b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/working-on-an-existing-project.md new file mode 100644 index 000000000..97c032005 --- /dev/null +++ b/collector/compile-benchmarks/cargo/src/doc/book/src/guide/working-on-an-existing-project.md @@ -0,0 +1,22 @@ +## Working on an Existing Cargo Project + +If you download an existing project that uses Cargo, it’s really easy +to get going. + +First, get the project from somewhere. In this example, we’ll use `rand` +cloned from its repository on GitHub: + +```shell +$ git clone https://github.com/rust-lang-nursery/rand.git +$ cd rand +``` + +To build, use `cargo build`: + +```shell +$ cargo build + Compiling rand v0.1.0 (file:///path/to/project/rand) +``` + +This will fetch all of the dependencies and then build them, along with the +project. diff --git a/collector/compile-benchmarks/cargo/src/doc/book/src/images/Cargo-Logo-Small.png b/collector/compile-benchmarks/cargo/src/doc/book/src/images/Cargo-Logo-Small.png new file mode 100644 index 0000000000000000000000000000000000000000..e3a99208c287bc3fba67de085255df1b4ad22c0a GIT binary patch literal 58168 zcmV)>K!d-DP)qr?$rj3;+NC0000000000000000000000000000000001hnzLgMuo5Ie z9LB$Y&#cb2ZQHhO+eU1k;+$>Uwr%_Qofp$pw>lf)>tFIMW;+{KYG(L>!)F=1=YYco z?%MmI&+gp!tbuzDS!v|KqqBDWU!_w2|DUE(kDOQ;df=$_2JSxa!ht*Y8XdTO7azQR zCm*yQtG_?ACkn(<}F&^?kH>QB5N*+wZICZ zMXVhXK^b|TgUXo7!zxv-4_Wp6tWmh?jSW54_pqX|uE*vi zcngc|%*cZXp?}sKZx=$q6$l|GWF=fBS5|pSdG(@@SFZ*>CrSRkw2>wMoDoE9F6ptc ztE|4gTXRk;niNF~VFWn>%zN+d;Piw5k}z|k^*-l$jzXsrrqURpdP82Wp0BL(@ZZ_e zl0@z28T?I2+P`}FT1P!oQLW~%BxobpOPehK!Xn+N0R&zNjcVBh1l5LV`EDz>eK%)yn+nj zxI0C3BnUwS3HScsqC*6Mq;}wdJK&%ILYO;0a6||hkP-3{rt-M5%1t4!UO*^42((C& zpP-?KemlsLWU#dIoKrMk-mcpGVAocwv9uLUGDUy^(LvmvPWnRZ0NByxdeEDn?Zm35)eWFf%k`wyVDVZy5Z1@2K+R* ztbaxlZjJ+a9uQ#WboAel9Jh!No=-+;^4Z$WPF9nOk@hUAokZQb0-f% z649I=NPz!zJ8ACyVI@j=z}+$b=^TPAfSX|!QC20?X6O#EtTr949FI+~76?LC>YizN zmAX$_yz=hYV>jRR#Dxz%`t%LJ5EDwGWtLrzi6^PW&!^gYoq0FgX@mLqDC@g5EjG%+ zDT)HK2oCPnyRDNb%)0O2vvXEaawN%`{>l32OZeZ=<28FI`eZHN9ps6eBfB9mjDZZa-qCdp-ZzNr5Z{Be>~?F`x1VQq>uJ(( zi&-noX8qZk(f2c^>^D>NUxw@cy}JG@feo+6+6Q7^k2$h^O(C`^?tZ3Y`t8KsFOJi2 z1p>s$ETfjbRVhHGkA^bS7>Eq-Zut1r%J{k ziXf;fVO!UtuKjR!{kzca9l(cp-PFG1<~RHqURSlBcl?wbcE2Ho-ET^9c$4G_Qt5k< zaV6e>K(0M3A!h2SWJ*whgpkX=h@uKLI+3#dTuv2Wl+#IDF+`kXT-)Pj595BwI`$8s z&Osa?4M+|KrlD9z5ux#}K*@t^Q9mP#vRmEdyLkH@SP`X@=96Xk^F1ys;*KMyiq& z0V%H$gVa4x6N7RM?%ICNWgh@V)P8M5yIs=uYeD|q?6F_?#lVVvy=F>QAN@S`_1dZC z@>36<{@}m-m0?)_wsbb{nB(Y$fRe~586a{|NR5;7j~Plv)k;OG3Zw3U*fMYtVvSSS zJ|ZQ!N-dRy0aF_2?x+2s(CWd%){HlBg&mTtac|af3W=0WebF@Dmii9oW~Sh_+ZCrb zpJQ|9IhgJ;?}XW`11az->3@-XCIAX>soymv123Wma8$(hGSutT!wLit5@IUN`Id}- zroHb??`a?UF>eQcfPG!~lI`qI51;tj=7Asa@7o7|%opx+X&1L-y7NKyb-_zq z`_i}nlYcaBm%pQN=}O~4b0CJD0mDfKa+$y&C*dGANhdlVYC{ zLQ+#_uIcTtL7Iaf?5_RPp9S1xUl*=q^YJfWUl+UF!6VmKA9~Mwx5IdOx4z4?J;7;W z)^~X4$Y~^(CsGIoA!05O2?VN|qMRr~oeYwydTd84*o2g#*+LPIpWSD(j=H#tx}KaS z;$(??R19A1#z?f@moV>Ji%p<5N<@-|aoDlG`y%Vx&l0w`n6=5&m-oJ^tCVW5?%tvF z3LC7X44(r8v#eBbiBMpc%6F{$2I*8GE%ki^djb`QZ(6%&ZRaFuvS=eIv&_TE*QT&L zcE0)Ctld6ndx1(`{hB|*zAkoMdeiSbUjE>}cyAUx^6i3gg57q_c6H0FT;KJ~=QD&v z2xd78lz^K1s!j4@rE0^!^pXTM03>V314y%=J3KCl`yne00oW>F%|2JODG?dEUQ zrvGwa$^XyVKk9lPzqsf%fAUuz-}$b8{N6C_j=XQd&uQoPk)*Q-!|LP~)9w_nFq_SB z-Xl3c8X-r_W+^q_1)!p3&)`K)2}pKw=w}b02aj6jOSCn`Cuu+kk3|+whpAnQS&aPpH z1KK@}wx5Ca?*zV=ef@t>WMBKIxY)I?!%zLS$G5-Z@4Pn*r$-88JWYQ<){bU&ANU1m z%VvGbc6pmLZRz{Y#&pb1g#l6kA_^5|_hn9QJcA|fbz(7Zt;uhR`&?jZQ4TrRWY5&5KAbyIG&Na3 zr*$Ab#@g2^CzR=IuE8~3j?$yP2T_S(BFDj&WCNI4>ktFvgex%= z6{$cpNNQ1YVu~)P@4gA`-X^a5{QXln{@<0{*gqlm!q>qk|ElBL-~M;rJ8kbAd5MIS zXqt|g1m7)4w6wF!&>SMo0U<=z%iD~b6O_n&KBwzigI1ab1Dn@~Yv$Of(Be^Cf1g2s zw1c=aHIib$P8~~LtZlq4QI9&aVdSh>uxZ2n+&3xO_I_r53UgA)72K%IaRzIU@P0dPSf-=}O~ zOm$dZREPztBK02XeMbYUL!CEkjkY($^`9$eP{Z}#0tEhV);`()7rEHAulZBI?D+P# z{p~Z5-97Re3{>8G>t%W05)!nXHKx&Kfi{P@{xWFCcC%);zRk2-(RVHL#eqSqIeZMa zxMrWpVPgPF(}oyFrfFgvCsKr_Q7kf1tfRrtHB7^n)7vj__tmFK!)+E_wsE1RB<`z) zt%K&AfBbc|fPjHn0Fk;^rweLxU&Iz-iVnLsFJ#pBX%IBe;s3K$>&e`-p64!lSlI|Vi`p*EK zWnZ6&ifmttPx_U|x6Xkq#$;fbreKg6r)asW7zOnWrk4C1Z4YqWC0u*SKC@ZgqPzN& zXdn3DbdAI3HF3C08kfXz!?4>hO#?!t^n2q;qXUzoDVHLdTQR%>`(iJh;UDOKVoC>bT8`2HwMbIGD1 zkR(il_4KO zt;^;OUj=O0*Z+}{r@!@IaItG&4?X&31KG4Y1DTj2GYc~e0Yqs$c&9~8D7g$sXKAf> zrOm=DXVV?fJ@WHdrz4gpw-7S(-s2>Yr!}N4UM7@W<`G+i!fw0df(HVt@OpP&x=q&>z$%fXFBa#-eLRHJ-f&9 zYbd7FL;{H0qoGA8(OYbj8mfP8ZOP}^o@WByUi)fGkC3TeVn*? zmDWuh^({@)lM@KlO4Yf`l&KG2EIhM)5vh;KE@MTuNcLV;L16u~3a@i2s5IBH>pQQ+ zEFg}AAkOEeyORCEbzy#;>@JBqNYk1$j=1iK*+V~)w7W}KK2KV|h)$1(B2=QRc~{9kj@2+5vct-i@BsStC~5}(;>*sQ@YhO=1^1$)UZ~dERitOab%T$7%j`x0iNlNgB$L1_% z4P%IiD3D9;ijwHp`e>f+v0qGg=`q5%!h)aL5w&N%6?Q=xhJp19--Zh}X?0}LL+3#x zL9v4=W-{k1<5WRPA1qWmR>!2y+dvhmpN-{OA{0wnPayBOl*DtbL9A|g% zxaj<eZG!Nt~MKMXuowZ);h*p(p zP)|zB8Hj<63{DkQD%gRVeXBh+s@2AOho~c?_8Ql}G`3HC_O^Nat@+B2{VpJ~um7Qv zk9^?!x!AR@YX_&tH@@X>zBdkckCb2_yLB3Q`sFQTc;Z^ewfkq*2UB(SzN>wzjKM7@ zgHe0^=hGctBW!QuG?Nf~cNy*Oqggz}%Qx=w$_Kuf%iW0PiCN#$^*w3;8;M~A5_--q zrUD(awECYl!|U_?h5F2}`dS1)>R6O3Dh8wmVY4YY>(|k=vrIG3F*i8hs2silcj-yX zr={_PG@Ov972~jBx7`xLgh-}u)rQp(2ihRb993_5vABMjY4se_<~BmYId3Yh+TjG= z%e}+rKuQ>V5&H}X=K)-;YltP&rr?T2K0i98W>pm}2Nd9<%#Xk0+unEc?f>kZ-}u?U zOYG~vS?=RM@a25j7rtk|F7WzwzxG!hZl3#^KN7;OS4qTZy)7U!Oc?<#A2byxKnaUN zrbZQP!(Dp9dS7Y0W2cqlZiS|i6nBU~oCf-Qg9jhFW+$KHlvu4!8OIUPhNhe0v@u9! zGSsI}XdTEdRi_1jI{#R#Bm$J%Esgzp0$Eg?fOxytrdwp~E@jsr<>cm)6Pm+D&S1>u z#t(4v!gsT}^E@dgTr)EuILWjgx<*JjG1v%Iu;#u>MAAr31BkQWZ+rQ1dv2O@jlnHU zTPOz(>I8*4<5?gD^@O!%)$rweoIiC{HQ<%+MJ-jaWpGQW{hvFSLYqG_F4QqXV;^mlEO~L&25Nf%Aj07<^<#dYDYht(dEoIov^$4J>0zU zUYfW8jyg6Gb*rbA~wB-&_G0^qxNRAt_Ht6hO{K0knxFW1xAZ=bE z4kzRkNtFNN5CLkh59i8Zff~eZWwZ4BXAuqnQR<_{Q?Z#LW*R4WgIzYjZZ<1c>ov2c zebXDRw|{q7-T2FB|JB-bZxz>n>3+@=*)~of>8t)7^VH$u0^+^*Ha2eDJxRi{= z5wf^h)@G3rFF8k@#x>zTvUe82k|bSr{;sw&Z^Az-ys(~1QOr{G>Om0EDe4pjR59#L@c)P_tq=A$o$K%K_PK5(!wnN^7 zH-^4Fw`EC(*XZtZ((VOu+L4l=_>+AF25R}9s?vaRp3myugEt*)aUv(@PAJZ}nNE!J z*-m+kEta4N-4PIX z>ak{|-8^&O%L2#JB&-`Tj2^XFAUpKh7jde}en?ddKW1{Fe8g z|BFBMKjY!{!pq1wR*8}^`-jPx{VAnFA8^hA#lAW(j3p`8&m(Dp0T&k5Zn>U&cg?6V}Q@q%S(O)!9q zDUk>o+F5e#0`L;2!KHDB7JHy96*2%-1nRc)JlO`G5%TV*Jas%im+a{pCFN}b-G7|AJu{Q>VB35Sv=vc7v6n*Ohg@Bfne?q32E z-|z^|2kztR9twF8ZZ0l>;v|tvlH%%GnDv$VB;{|4XE^RnwxmWsXc0ifBfbS0YELbH zj(jNzK@2c;+LI<_(j!xHr0xdwy}4+dpFcn^o^aT2*l%w*3+`nX8e~Rv387Mnr z^gCiY0kA>{)}#gO5KfIRg*sGFh$#Uenc_kLNX`EaigyXpM2yGB!?^oh;+x+YLjPY~ z)BhDD{4ns0Z)gOG2mb$v`$(E_1!fc{3P9aQspWSiXDkYk&+*)S@=uU>VEO*9pu7BO z4C71s*#f7Sj0txHw16K}hX+lNDrf?e9S|ewI1-aX#ygUJ zbUI3kX3U5~h|Xi5(+Gg`Qr%sZl67UeLw-&ys@WpM>Q-TA!5e_QA$O8O#uekGtr}3> zGi?7i*Ec^(>K-w>_=x4jQ~KEop9Vtir`ZCPablQ`csJ4nZ=i;FTxe~$&@A3JGVZi} zZYl!C8@wopFZSY!;7{&74+~u7+yQkM;vjfe9B!xjJk{wogPkgxUu(dPUy_A(!};{@Rs!* z2lHt|Q!GZ7X&MmU*g&N@``CUabxa0^M_j$nlY6g{rdo=k6tF`e)&4)OW}5i|anzDq zT;}0*D4xS{YB!tB2gkcF9$xdwbP-|&F=fa0t)^!qQ6DYE~59fj@fnkhP(*%^z~+Sv-H6PhNHU8w+T z6WocIoD+gAL``TGgE!8Vexgb$&*Z&OJ;y3&8>9tKAka367;hM^UvqQyzgf?p*s!Eu zJ)&tBR$4ag!NB-1WNzAFA~ZtRG?;=QYeU`qJ!Z=f=!Okx|2fm<3#P+eE~$MnqO|+O zeip@p764CV%c4x}FckzMt(CViDx2+&{o#oCo__I&%d>NcMR5Utq#z$@GQww`qOM!spZf2AW850ufw)Nq6y+ zY0iE!-TV=bNIc##9yeBEk|HkmssthB>_UK$%OW%yYkoX#Vcg<~c;6wev-db635WU_ z66VI6a?TfRW=eX}Yf@mGa%N*CXrZ9k1ycHIO~}4u`T2@A60x&X_Fpze~GhnBev6D_&pUGRd5? z$3KyG-usxvOwj!o47cAkJ!#xu0qGle1xS3o<31Dx(n)3{C<5hmDu#awoB~+sqnu

(FRB<^BZBe12=hny_8co;;$Z!+F&ec3CyL8pQ7w7u&P>&l4M=5LCn&}> zGmt#sGZ9ss$7SUK2)G`cLOf#9B2hH6;jLfiodFb3rg^pWs?Hh-psc?u+Q@vrQuhV` zT0zY~xotbTR*7-X{`P-!b^UF8|B%_`$1KjD5==obFb@Z2-N-na9E!W`5ibL6>*>1@ zNjp>;T=x(^e?qv}lXhPccb_vIu9(J1#Kh$0((42g0?C1I@V>QY*=+Y5jz?UWF*|>s ze!jpnnFGjh+?r`7YNb2s8eqmZF#=!rxR1PstqQMCjSBUo`~wOQQJVY)S_KeOa>NAQ z7kNzqM+N&EDca}6R-|3fE=+HUcM!*c>2QN;%H>?XHzoITvV{*6f~uT)-4jd3Q?`AV z(lf?^G!A7;k+|FeAwin(O-o80)3C=$!uz~o9hfS30YVcb--DL2F>4x0DL*SIXF=|i zn5rO-OqmdOlkUxxFUgnJpf0d3kR4$h2kL ze8x2G%!>^sSOG8Pv4&gE%Epq9~Ou?hMcXu{WNS zqXp2?g!A4&tC>Hfon4w7W*i6Nutg=}%M?u+CoQF2aK_^*U_kX5t7Tly#-LCN@_#3T zVxHVN1KEk;I2ECFXqT{jAHVw8%Fl8A%!sJduqHBr=6eA^NIA8J_{#u9OTkL9o)ZYg^PNna{ZU~DZ zr^t3cqB_#G6I~Z^X+lDen>{9+eN3y7Y5T%JZP>nGv%WUvfeUk%4?d#rXNVpd_pdk( zYkb<<9!^grjsw!TlFb(BR=NkW`*@pS0F_QAhMbg^<@X}eb^!TIq`qI^w6x6S8@{1g zJf@vL!Zk|-%|h}$q6vp)!f9E;Q9#T=byRgm+AKV-+>poH2Qqg`VT>8PygS*1KIL;S zk;V)7r8Ua^>LZg0;ToKCH1kKs**e}m$L~HffqR;cBu(WRC41*9U_gx$GT4adP^r8*`0R!};&@>GXIw7SE``iD{iuJ&ztg!byT@!?b>Z#+~U#H6Dp1m`2pO zELA&Q!Dk`&Kz1K*H56A(yik-1MwH$dfRM8?5vYanW}>BcQhmefen|r%$)%)5?4CvgJFxeY0V>lb+TaXKav58O7T$qJ5f@1` zFnfY+dzwE67mzeqvsyJF&J(pE`8n?Fr!s%^Gf9UlhTHF>yU&ogGmaN&%0q@ccsO0& zOO`v83Fk1SB1m_x-n&8}=9U3MDr$-oH`vL}z4gPoPSNxg`>&pJ_vM@>i`N&+3);@% zjC+?z2$6>9W<;H_-74$dOOr{><}3Q;IkTz94KF$FZt;3BZkiB5C!|b=?Kv&m>m*q= z3{v@p_ds?ZZ#ydeA(5iX+_0Pl<+8F~phB^EE}u^}c|@GIzE_w%Fd!4a>3}tbQzWKz zl67T+z{25byyTvemfe*}wE%|0GA^d6W;(%xIw1G5Vg^A6pc63)v@87C`}C_1Y#Nu+ zL>%^@1CB(Dkzs@>8Rv_>S6aZy8SR6g%>2nOV14x|>u3Lv#=pV^#i!9q=BNot{?Q`tixIYSl11r~Fdo6!3^Y(DuOx8VWZ z`Ny0+`j~!siARY;VzV9DY}W*@W?z1GF{5pTyH__1ci$s8MaMl!6phn~>jzYyw9jAe z&3yBtr*l*@Nfvo)DcL=c-N&i?$g(L20i+BAltjv4qm>V()g8jX%LhWYq+dNH^cVQh zWJdbV{yu4f!Zdd%RHCK;Dp()PDJZpsbr29zqVS(IQwTsHpG`?~#(PYcm0|j(MW5lFEV_tC51eUXo&~!FEm8Lgf29U4_824Q4R=gq$FzS6 z`GL8z66T~h+I~%R#OZ`NX&fgT1bFY5_l@5!gcZaCnx;Ik;El*xxdH6;?Z9zJ zh`{xFWVPtBlBr`)CIgSxj%%*6u=#7dYuCweds^j+NnwwYu!#u${+4FGb zX%`JhmjP%*H4>*Ih|xH367X$@8VAl?A~0HlM4VHEBIhclrgJspNhm_4WT0Azxk)RD z6(D6zrp+O2Q1YT<&5Cl^K!Ic)D}!VW(Urm{%KYebKvsE97|Gev+5u#^l7 zdtJ{#xm}O6L3sLT#?yx#sk9sJGqU@5i*b8<3sM0EI3cp4ieNT4I6q@{_73wOyV}_q zPSk9BblM>##7d(~0L?%Zf>9*OM9#7+YR^c4Y0A??SEv)U5njE(@Xi5to^5%exkR+H z2e`!txcR%5OmERxq(QDGh*q9;A|LqgIi&NTICQl`~Vj0Hz4e zn;V8@U{g#c89EHn79`IuK489B*d$oT*Nodc#={m_eng}(oAESma70iC8VmVZL{3Ub z6hx|L86tMSAHTbxYlSBGVjHA@3SXDF2eNyuWo7bP_QOa@YM%P5vnMuvbiT1N%~;)% z2$HjgMD-f8y^4YXSJcc6tD@=zb}m`(XXb{vgi_#ip)EK81IDO065fGpdTjD|_93Bv zfFMRaV7P@eGE5`G5K9)+XQ$qvC8ziMZhm`ETPsC9^&S*E9PC=M2CB`Am!nw(vZF@b{fg3p{2Ax~fJ zytTfah~scLvb()OrD3*w#DfP*+C~`nw~TkMY-JtkfYC+9$$k=nG=WAagBt82m;c^V z%3Uh&xv&Pkw|$u?C9|v)l6xR~+i?$KDfgS4SmU0k3eqsYc*<<`jy3TGh|__1+~b`> zp;-ut+-Mhvl10@KM5Iy*^81riss#vW$;Pq^L)CO9O=yDP!C`wf%}&}`@9U{aXN1WN z6sKqaVZbwtk(ku7hS{tu;_=YTP}kbVrt2@h&GzaUo!)XWgZ@E_^NS3=;N*m}mY=t} zTp(E$k+Ma2p-&`IIx-$dbU4ri10X9oQ|iHwNHS1knnsdF$Qh^eb;G)f18nOP-Zl1I zo6Qa*33p4*9zV6ggiA-J<5vuuooP`m3rzz;LQ{kUq6tv25bAakZ&#T-7hs}T5EWxh zr2d{DNya;&uH#d^2eSKkJ0U270T|9F+W95R%l8TWC2iA!PR5&3jSwR#mCe^hOPGu{ zpy0qsX5mY*z9~)%T~i%Rb19pB_NA1hF#%{6ekTlK-5kGoYIZlizeK@k0`YK_dtcG| zUgMY$XJrqIwy~zX(Hwd2thMZ`tW(PpsQksmuIwCJ{K-aF^a3L<(-%t$9-LCjt0w%r)ENE;FXRrt!=s|P7wqr;IqX|`SN?} z&O{M%U#pN=ozyQtL_x~*E;HPfl&FrZnkSFyk^V95>I2glitDVoo`yR}BPkhef^o9! z%SxWbqN52mc{C%(L}0z%aP#V09Pd7*=Z=TH&|EesGgRwlA@LLt6;}Z&71~6-M+=)I zlJ6b7oPsgM{~x?%X#>-7K-}H{#QBb{U!c0QOjq?lXdFJ+o&QUxxmOLl!W zRa@BXy*F)VQu|x^JNiuoGUYzL=Ak4rEo?#$p+||3A{ry07$>Vt+#tnNJixpM0mi5R zG@g_FiepR&P?yG1ogk=R{wbHV8i6pUy?EcUFWNH<=%zgyH#ms~a))8E=Dp&Wbpgx1 znilaL;(N=^UVin0?dxZx!&fZZk&8vZ`z{kWL4bsw7$fFkLh+Aj9-R%+==vVL3&*peWrsECM-Nv>| zIY#1e00%k^;60iWIA6TQfKxK73znHmoZ(dPZHo}luwc8nB_?onZTp*A+@kgXmt|K- zbswpPtME+>$9;V5Bc;UZ;ZI=o=wpUSxxHPp*&R^d;oBAX8IyXz;hiIRheBoBi&R-q zK95POzkP5h)*LU)@Q^2TDVb*&D7JNI_TDdL`SEX}efV=w*OQKS=x~k1txZR7Zg<>n zj-&*u#f;1Ig-L9De}OjVM$-J`d;gnH|JOg{`21gT?!Ms3x!8L{XiCO7#Z;66wO@yU zSJrfvXBDYtgpf0}98rrHJg;LbtK{;el?--5QXv`>zcKgPcD6DQlQ1J2Cpq7r$FoNt z@$m7x^dTDkW4!x5aq}5Eys~W6CX#6c%`E@mAln#`UyI#`6kxLqwOtbWGZX9Y2S@ZP zHm{$t{`LIc3?}rd<)1@7v|t zGt`Cz2ShiBPK?9E{x}vX(tOs^b`3&6Gw*AhBDb$zuzvM@vwL2&6Hk^c-p>#yP828z zRSr}aa{yVSjRA0N;)_t4@G;ftY<1GG*VS+`zu$XEO7uospm@0}Ll{&jAy{a>Q1X$~vQx7HK;*thb{}sugf#9LcP|l=xq_}1mn>F4 zk>l~e?(Q|4s~g(J8Ar*tJ!u+AX~av)xdg;m#L~HB)SM+y{P(?6+Rz~VQ)K=D&Eg%z zgycue-879%#`}s`>8Q|m!B(t^Wa4QvBWYl}zT@WAciG>3!pv>CoO$|(Eg~%`ReA&{ zJ_DC3#^e{5@3WM<6ssLCS?7tJWw#6|U?*gbTq0*~Lfd2q++x?`usxOHGq0=zKSzCo z!QXLh+nK(+TIn16lw;aRwrZAOf>9-26Suz9cMx zgrq$T8#GOf(_~HVF{uH`tZ!+IHqjuVw`&|GuD^WF?Thcj@g?W;#N!7oB6Cv%7)V(L zM75$W8Cmt?hbnUdiX%mElKE;>w};Yq(we0qAQZoTdsYE)9xu+64k{huXSjBrWmyAp z_X3G~rsEpX!RQ%k;ON0Qus-3zfL(~0Yvusqbie3nJFEQuW*CScKl}T%;}^`l4Oo!S zLIRLVx8TjF{~wAmW=a$Zo74f1UH8oXw;uN{u^J;9k61H59uc=;9B(Z{lV-_s{(#xy zCov8K+q+lnZfudk!U1>?=Y+7)kkJyE#mEqnx?x<42?`iok##i zn^^V1`dKb?mVL$KxVyXN>Z@-tZ9Zr2cf2$6G^;lExk|1gg(gEzziy5{;mqob5Rh4#Yqg*Dk>|MBCuBVcgw7+>^#NHo+VYnF28Z z4G1V8R7A4BbGjw`%d3OARy;ThSk%mmT;k?u1ux_smlKQhoR~L;M+MKDQN4iw-{G7T zAP32>PfJ<2=aJpV+lkWON(zz<7F`-iam2*L!w&Jc=<$Z+dUJKXc<>J5-)D1o&31Fe zX0v87pVM`712bt>gylyzd35apoerd7Lz>oBl1AIGZGvrr@yh~T*W!F*v)1?H))x^t&~UnBvP?9N&}|8>;DZIfDJ z)}EPj;048@5+L+6%^ZxACQR{w47UtBE9IDmEslhyf#e-AnzX8xX{TUfQK<|9x!?Bk z=E(JW;9_}(uOF`reQM9F{`|7_fSAaH)xx4L7TGaoVYV|F63KyT_6)1m*(lj0Q9Py;$l#@}C8o)ytW23E zVZ5;&4uv!#-V-zFELV$Y1a-Wb6_x2B`4Tq!iNhf>rNpb7fu|1xoJPLC>V=k|WT-i6 zAFTX*Gc-{mYd-7XrU6y4b56id?=!Odc)L-|5>sXjY4*g5DRZb2s-uxQeVV|WN_R}! z(9O>XVadZsPhtKMoc(03ZeAfeF>9f9b4DhP1hwl04E4Q6`c!}Vv)116J>O$+&Ah5alv^p$9Z75%G3odc#e2jk^ku3Fm0 zRhJ6tdxcXXD^N7Br)H15u?7%M%fu;y_u#xBycwz$=zx1;#rt@>Q5aI9fL(Y8DJ7nL zdEmn*GkYyX&J-EPi1m7+GONwJdmh;tPIzOQrQN$Ye zw>K}jd-Xq(viUnWsr|7CnoG=_ zbrr=zOrYZMKG23H_X_8z1VoZe9+@`AwolUmlI@KIa(V6?;wn~^DWgypl$?T=T~!gO znm=bYNGxW7cOUhfEjk{Yx5NmbR4~fS`cw7pc)_Wc< zJEld)et_$3B7_+}?bzLX!tUw^`0)jcE;76DIGLg3jA^Rwum%WFms|*$kO@u=@C?*E zRG+fqT&r1LLtdJz)Qw9Ng@P)@*bxz2Xz;!PZ?uWLDoqg`jpHS0XFM--AWd1Itto?# z!$NAA=;Y1pM+(5yP%MfwMoHKZK6olsnl48DhFVu1|JBhge!`@W@H?nKX){H5% zn9cHD{>;qG%*@Qp%*@P87alV+!{x_s8M1eiY+^fxnUVDEuIZ{%Du0Fx&#F2#9*suP zW}|kWqt=mD7NHd4Dgt+ngU?dknPO~t&N>7y(#U>oV2<}3QfduQOhb=Zx1^Okd8|_F+{K}g{W|v*vk7) z%NPgK>L%l3K%M5N+3HXMpSAV@oi*eOhk(2RSM^Z!E>7$m)%Mk31J!s3JYk6YK7-ha zb^?ecB@Bl3D)#VWD>OCRxeQ&?|AAqeF!}OBU>#%;dd92jYH(8>jh}VTt9b@K1MAhF z*>`FWQjsY5F=yGMV32d;1!id-bkCMLIn(D{?@t5tq-xTKX`%|y5Nb9PgQvX07ky@? zAfWXxV6=S>ej2<-AIFdE$8GQWK4`8S#a4HOjm;g5%Q#I8$PLobwNQmjnP8cXR9Yn; zOun-`<28WX3l4U2fkAQa)gD@f6wq-}HyTs$YVnkDpRFLaS1A|CJA1h};fIs)@0>%` zyMSu2&g`nb=rac95b7+hTFui{+A68uA<)vKuK`;OQlQ0qHdfXl2CjOer%TvKvY9+x9MW!whkd{J+_jvSV4}Z1Fzqys9&BM$1S~J) zRLL*2GaD&pt}x~Ew+EE~fi9S5GQX(}Q3cPuTP|<#gliwiSnTY_%HCTr8V)h+Uq}D? z4%*FW{;&nKTMLvbRK=Js8{J3Cv$ddg24yK+AjREi&#z4uV_6Zb4 z?-Gig)9}4B$U`3oR@ZRw=&fih9>Pw4gslmv4aX6U_8uN+G6kF}X3DZr1C_QMF^~^} ziq!mMj=lzHojI9vYg52fXuAi^sZe)fXe<*n!|*CX{~XHR1u`)|zRvNIeZdn<1scvE zg0g~GeU+Kyd~RY@0-m8_~7Y+(sXe`R^Ve427iwYV)O(l)# z(crYO0DG8n&KzqW`H>&-n6vB=KghZ17&s*iG;WXyfWxTAYnLtfG{3)=;-!Hytb?Bd zLg2A!h-IJpGC#PAqI(YG?YF}Zu28kUdgLbTUAqZYY!T3U{SkFqtfClOz)!~D2bU4XoA9G9-Q(yh zO;!~MuT|{tDQPJH0n#8pf!K;oZJXe=pOR)?vhB6EQe#jX$t+a#+|Ty=6x-74ih)@- zS*2!u0z3h&09iGdX5Wny!(+*^NB$t^77d#a8?&d62LT+W*I*`+U1c*$beGH62s)c7 zU0ety0u}!~_~9n1-ld71p*3S4`wp(+;IUf?KHYwSt?e$eVC2nZG}kwFCm`a2;juEUNwmX3Pl1_Qa;_t04P=+0QsLH&?V$34y6) zVq#O6S!-{h4cNGpPJ35O5>WuB&Jk3f#M^8!uAfpb1>nUqog@ z6RMx*Bb%3pqc})gWZ4x#498cStQWv(#V`dvnai_@QRE7jF%tzDl*tWWgFr+Sqfs_m zzYEV5&xcTU&!d>Iv^ZMF;z9!lk4*EW-H35$P5`Zk{_Y6Dwa{MJgJye??5YY55lkn| zMT5q|L4sFvag`gdQSPAXok!U{%aunz+TbCz@Wc8{$7r-?vvpBvy%rW?`W$NKXmNIb zijN65+^7f(WKdC+W+l-sE(?$ZdVp4y4SIjg6_EK)~dS?Y+qAgp7{+CKu|=tu4Scj9yI>PZ=`;r3_Tc z7hU+_2CCj=6x-8S+F3MX5Bm=s!lC20F*#s&cZ|&dNkF#0>s^dW#{`hA&K}ApTxX3o ziLWu6M;NZd*X(QJ%94Fm#Q^XH5WQifMvVF&v8c2@ra&k6G3p)rJ4-ft+h?vB2|OB) zn4y3l7S!0UR*h|pQzc8lNQWHjomp%%sI%lS5{Zay+9SJo@o;0=9LVOyY)uMcTslxh zz$TNMX(pdxYoS7S{eD!t*QSAcvZ8znC=KpOoLCDiHR}kbZx!BCXj@R~5JrlnUEl&i zU@WbHwPQD9`M@cZv5np+gIhd;eDMJCMviK@iLyUsU*{&*%kZO3#G=PUkqDJCUC_jC zEMtM24fyt1Oh8~Z@L5TNG+bu2FN5}uN(8F~3uR&Dl?Nj8Stbn_KLbu#cy0Dpu|Zo$ z(-^c5HG;-^$2#_}jI$h%78}z3P~Tgdr@DTa%bb&$6-z`wOu&FusKL(JcY<*(T?1wB zB1Y$)irq^uMmfBW{?0Y-GHxua@>Hg*xmeF^bADzpU|H7%0b0F4z{5GFENyTN)$S#X zCxBLT&!9n;c94grL|?-1u7jf0{zZi0HH2b^Nh1+Tg0ys6nz*uzdBth~NhYx@Pwvyy z&w5XN+iOHR;p^m+y01S=^lAW7`N#Cxuwd2kJ-%;(@usbc>I{PufI~$95AbIKrcX02 zjaP5YV`L9^nHTS)3jvNG5(G|0^eV-qkyhv$XzGWje-jk^xQG7fr=qy@JPg-g2OewI z+u+I}kk^O*#yUU_gkHe=JE56xd#1YKGr35BW^V}| z8Mbo<+i&`3bl-9ZMi1T3(|l|^#+-02GjSMAasg2!utr}|sOraE6oW04gPmG3F(muS z5K=r4gAs7f1}2kjg5!v+NQ&5uZeaS-xtENQ)gWP36OG2pes-!5*w_*iV1!Fqme~U< zR%geg-znRvM)5Q4mPpcE8-PzYw_D|*0oynSvWK(G%gXXXReB$ila&+;qXmHsKkJVl z(DaN(MC44!C zXW%|%IyRtf*n^brNH8l5Tvt7#Axc{&Q@_UDeHb^3&w*@S9(cmH-SX{U{*j+j z_~un+5KZG{krPI-F)O=}zRYAz2ngmKUxgZ|cnlg)=kCh$$fR zXV!-pm!Sr~Q1>5st*XeEBcO2#JXC1_7k~i~jaNXRk_P;3y#+G8$DU0;$v%AJvWu-+ zz=SD^OKG46(7eDy3j+e$1VqfLr}>(ZKUtgI&snGWR_m0RF}`P6y2b_pngDkWWdDC< zUhcl@+m`OW{p&vao`3m@$CZO?_kPrSAN=d3#m-G3Mzf_xAVfn5Mxvp1iFj73dVWq8 z1LUe{=CW@cXklV**0haht5@RsPRTPX!Tpp|3v@B%3dh9h5<9eRoL!~VxWzPP%Pf*@ zL}v4OZy2!Muh`Gkt%Ci8`e&McQihj;rcuyGfe20P%Er#rLPe>(lf^>0_-6G|=Th33 zn{hP_INM!H8neC`bg!{F80kozbyx2{)@+`}=MiMDyZsyAql<53 zOv!^Qn`2IpM^5Gq?W(yZ7qNbF6lmlG(+Io4P~rVpQhPi$o`8?tP$%p$Rlp_^M5vZCMAPJ)E%pvp?(FDg*4n zl?EDQK^}u~Fsw}|kggJnskqtTr2UG984*~MOs!&M!y;g-SCJ83-WcP}6TsS@-1l}p z!7PM`H(wZHqg!G)4mf>fh@uP(yxV>_Di`fWZ9O*|z$yCx&78ZbGdMHp%CrJ&(q zs^P|i0T4^@0zB5&5)I(so(7gX8IJ64p{gv9v45*XHw?n8Pzty*tBtTZ#OM|!07GNB zwq4>a7X}l68nJE)R&*l`AdgDVXK%YS#Kv~X{z{*$j4EgjXHGGPVpmqXrF>=9rPJ61 zxHW*JE1+h@Q2=R*&8{D1uqAEJ{!*=&>TMz5{m;uB$mRfc*Eb)(=k~Apt_gU(vK(x@ z)Q`5l)s@{NSr{OT1tNu|zD{FIWj6pYSnh+wsrL9Z>$dUWGks2CQ4>q!Wr-^q7$*}u zGyRzToircf%_cRrx=SEkz!?2GqE5@N5%Kl#xM*U=#D1a16L+#nlM!e3hXN_P;!6@pV$ei zVgl868vT029xCzHO9L{nY5PtyN2i^$`**ov3GZ@X2mJ` ze^kbfXx;Mp7NvRluaxIM=?~ud6?c5!PglFw?hd2Pr=#5dQdGUY<+zWc7{S-zmE}qk z4Y4i>C*iU*SRAZNNwWeW7=da8S)4LP7yv3Iup4{>Y`Wb^XOC;#J~7vA;rU;N5{`i7_P?>_tP&hU*t6N=uu z_@a-h7$Q_9nNL$$QUeqR<9+TpDF{FW3d<5;SMLF=~$#etJtR|jo4nq;r+QeMrCl0kB99Z8RbD^N!boOVa2iw}Q z%sQq`AZP*H0-+unNr?e0$P5!hG@37y-6B&45|gC^)@`)$N7d^ms(!v|Im71m)Tcf* zd)^a%_XA$`FW>pwYdd#7xU&0-f5}GYKXo)3EEMIKdk-@gU_2HAgZ@yPbv6S}B|QJ~ zSf|x6&$c$ePh)#8R+Krzv;;=edIkSQAuQ{XgdTcVK&Wj-X62DgP{w;P0IPRWEKF07 zjL8|-3N(RI3Sf59+T+-0VyQ6_oTf%M{I(N|6L4xz>$&J~>+uCN8V=Wc)g-V#BIv#O z!T{ZYN0x)^EX&A%+26`SuJqgr?h6EHX_=i8$L8LL1tI;4X#A04j_VuK{Km6i5e)5|S_wIP{!Tz&f*&4j%dq$)FiK*L2 z<`pYsH7Jx7*$k9HK@+u-UMqqrUmCgCSbz?;Rlwis!0c8du`#(|dd6a0J%N}QmDP5M z1!#5B`=FIcwg&+%&;Xke!^YfHCzn&%YwP=zWoUXO8>-bp^#b0i0xoYB6B{E$0uGtj z;R7voML=5LD)8Xh0UkKBiz^#rY;`Lx3QSfjxs7U2cq}hu6ZY2P7r@!8V+Q^O9DsFs z<<1F=RHt$3-ngJquw$#>q}H`h76`NilLY@|%HV;7@c~AQHV3kK`Rl*@ zv*wGR^t&JTs@uQ)uh+UycwlMw)qfIpuYCAuJkAkIG~kifY^4f;<{Xa*tJwtW5dp1C z>KnFpfr3`oII$uDLx1Ej0e&0bs##MVs|u=1Rw-?^3e{^*qy5 z1za*QHn}r~iKGE;RVEb%RECAe>B~c0*(|t1d2F?fldCNR9~tX9cXc$0kL^+~HU+PU zSi9xmX-0!lsMY?StZe^EL%G)Y*}9f#98C4rMRv(%bi2S}4-p|NHh^ueM%?P)ZFUXF z1&4}QtP&{lnwHG;Ez2}swU2WkdxtHr`lqkI`OkmylYjVq7M}mw2!?X9|rN z6+iflS6C1q4HZ7%e71 zV+Lq*qDkBSnvVPHl}guO46a4z+4I zFR#DjTbA#>`Yj7xVmmJ2JI5O~9}y1=fI&OCT6Rsz+s8BY2HK;(DrB?BE!M zT}h10GAaA6DFRNmPZ_O`a^rbb>m+*!Z4PAb2<3IRfBk#C`X9dhm&?J%z13jj@v+$c zT<7~s!$A+x6TBuycZ`73$TG#12s~yK_6{jo*MHB;7FgAfOXy#kg!{ zSkTPa2vh_pr z@D;$7j!9rAdXLKM? z0-LEJT@5xx_sRAqnr&lU?TV+U`_>T@2R;@E!RpyKexMdkBhCa@{hOoN)A|@Fb0B-$ zeJ?zD&r^Q#d&=R~tHN;e#VD_TGpgQUU+ki)M(`A^Dg_P{wAN|aG`WnPOiHVgig!uO z3f47hcZuy|w5}6lS!G!u#2l@a6IeR-p;$WkF=*|7R|K?qm`5zO5Q;tz_)sych(f#TX)zNEf~Dv+B46qd7R-oEFJFaL+nxaSGq_s`p}{j~?mYp?n3=y%`09Cb6EpXf^fL91%^)S!C# z-T{&9iXfF|WK#7?lXb})>)Bo)7-P-q2!bFBstO*b=30BPxc1&yJ~aWawGRY3s|e)? zD6Rul7v*>t-QEB@{prLEVqF3un#z9LAMC#MNq3dK zb6*l&V-Hu}LWM>apl@7R<^)Fdb|FuovU)I1h*F|evofizMJ|Hxi^=#HP;rrw zG**33y-j+cdw~!j`GitF>piniQrR#9LCJ1P_i|T}W@$(mWEwYjVuv&LD01u2zLh3g z4Toc^Eq)pB->r4lSvo4`E6SJxE%f_@ zin9#UrkG9@_bTjr=ni6teaH`g1oof&AnZNzES_y7$(;0T4}zaqT@|=M;9`Ylo};zAhR)&lN9*WEqq+Lt$Xj~=KY**Y5sDrL{atMD z4AAY5dD!dnVh4Mc7tvl=M%La3WG(wgD8Z>_67-D>tL!%}AjfWkOXkcQx$&d!@pMEK zoR0!THP$=Z$Lbtdgmea^Y*@%)CJet#b&^Ei#4Z3X3DPrq?Qg@%qkkCPZX0Q`k5ryO zys&zN6sJI{Ifv6RflsAaXXLs$@f<3fl*ypvSz7sdyVbgw8!nT1Zd^`Fu^P1VxU{mm z+UYM}#Viy!IUQjDkkB!drzodGTwE-?uc_p0+U)|%OI`H(eYCnO$a@<= z*2DSn9!BSf@ZMVuGRpLr5YaY~1GY$D`y;IaUsABNe6AEpDbQ&@%59@0kXJCY-Yvuu z49CgzQ95dAHHKxpI-EfUnxuqTnegalM>NJ5p7QcRK|rx5!-}=l8_y&t{ELgXpU3B%X^R0afZ)!v-FFdrSf2QbKG0M zJ6%~_DOy>z*)9;~hRbB08<*2!C{a&OP!0DmIeCWJ=~sy3SLn8YE7!I$Sig1O8M#?5B69&QC7`v6eEnPK-um0nyZ(nL2AeXFHca23 znw(-bUF5!|xEPlLBHd04OG{n!2TN%8){u4AQ6tBjz2|uL@n6L5Z~rXf*{9g(&2V+2 zk0Q@Bu})yCv_7l%1#*t~s{!27^g7h96!)?vDHuYLy zIa*~UfD83*5NsX`4y-I5omKeFvoW6SP2^{K9WNx;DEC0T6X~&BfR|p9gq39-=~02n zeLAVUW{V!AJiyLOX%r%&mDVAkc}FZtL1;E9R03X500THfz#Bdqe>Rs?hew?<>>PXi z{6#B#ak4p$-TRaK_aCPB?rn6omL~mnz31b2A1O@T+;EvJU;O71FY-XjFGoF*hrX^x z;vFS-h`hZFm-n%@#qAT$w6u z1-xsqqUY43U>_#KJ@M6Sb9mGzCV>S459NlDb`D)Ag>6{s+^3)j=bN+1h$;j~kQl7Z z(-Mjs=6Q(xZbI(#1h&y6;>%ZK?ca0^RhdXPmD=NBjk2t|g&GG{1 z!3y~cURbtXwn+;~Dh?_BH_j&xuTLBlQhn;OYJz%tj_85Z z4FZ&xZOQGIy3BwE06}OEC~L_tAdHbvAXO(wu7h@W1MT&z;)RWerVw9x8w zP|t=4$)h#6iO%vpWW8&E%aOtap&Y@f?tWF(DCZVKM4m6et2JLQz^euL4v-a?j)ypZ z^EpPxFW~Dlw2Op|L4iE$!m1U3YM=mRbyP9d>r*X2rBsldU;}3X3(7!8jNI0$k18H7WDjVa{^+J4S>mN0iA2>ZISJ%>WG>4LS9UQ z3>%^LmzhyX0D;5y)i!QzIY}WbrYVi5QO@N>jIoPr8*TajB*h%&<5@W2w{E?RC`X;PiMOJD+_IuRr}Wm>&EBOT{@h79r1|D?cOLE(jno zCzpGDz@t8_0%)@45L^q|RtlTj*6iHM1kTIUpR=R`rvT-6EM;#@rPLTZ4G0h%BEm{$ zWOKU|Q8vXZRS#_%rBVEsUk!yI3pfQw4&E&aFOobm_W+P{v~njLvrfxn(Diy>At7ah z55^RjY5%ef*Qvq4r%Ht!-O%y33q3y0#Z*-GIgqvBv$mu*vZ5oNd)Deno;AxnhEtC( z4s!hLMX!D}x;|OG`e^q4qwVbRt#=RdjgJw^AwoTsE)^gly<_&j zm5UwNkqqrXl^$xzqlMWB)%XNqe1PiWC1%H8p}u&IJWa5&wt>OMU1V2&7sulc-keM@ zt0BK=(X}v|I*ez4&vKzmtgbGj+h0c3UPH>4Fquqo@Z#5a`SG8@$+N$TY`TMuC17*C zk6yP0$!|nay3j^%hfFJ9bxkoure4^2keFpAc<^CsXo^L?R!>4-+h%ZvL@+YiB#Y`k z2iVUxiGQ(%GK3|Gf>3E`1H${7)iQrlB(C>PDpA1D`Z1z{U$?aoO6=gnJ3ZXL-i3Dr z;Ifv_ll|?&3%IQgGPDYhXl8phy(sSe=1J#?6-}cs3&-PlQtLgMCPbv7&RPVU`*7(aZt*}1u1cl+4S-DC%0{0eb$fLNXZ zVFIV(o_5;MI~kw1a^XWx5QEM_98Ch8?gyf@g@kH?dUke+1XQ+r(X%Tr@#;^ z-5ROB!13`hcHc~JK5e5rxPrBfEew`7;5!>|#X3UDadP+)JHP$Q^Thq1L4EQG*1C(t zeGh9ZJ!DxXz*X1m(`@A)34ju71>EvnxNPbr5iFc+3cxk0mX0|uJ3YyO8K4V`2E0Ge z6{rr&_nalW>Kc&l19{ChE&x^$gASm1#`y-D&HL>PYyDh5mmnec3r@VRhc~)1De3-= z4wielambzW{3d0@mpfxokDa$SiN62ZAcIb+B8QCEV-zj;9jvM?_c zeTDHJ;`A7?8X-v6NMM-mG&lEl>-f9_GTU1ytaN!uj?N;^h5>LKXU0${0I8B+jCyv4 zYJ7-txPyAQ2Ui|osTgB(Ibo$6M48~|BI2~{V~SPrzILDg3eP|OvpC=R5xVIJTWc+> zul6O`KdTa|I*_QSWqp8vqg?^W9H5;AHZ&s`aTqT{Jp*8>x?x+2fM)9u&uK!^5V+42 z06?^`6+C&xYl!sDn(-Al!HA4fsMsVP-0YyIqK9yaxwEwv9^UGTR{zb-4!Uh;CZDUk zk9cD{2Q%^J_D?I>R^Fe&y~QFvVltOU`bt*x(C%MBDpv7g zZ;0>z@E7stKmAcT8y?K=-CBzuJiOMrxfVLTc#vn)mq_DRNV7wv@=PcjF-Y!<95*P} zEIN2+xoxo}8c>?PGfOLc-3X zKEy_^5{^@=)58~Ee1R{1wwV0(HMZ8exPE;LgQaB&8-kKZ+lBDc?2JO~#q@q*lm~KN zXdlec*rB{eD~xLzDm>klb|a)n0L-*E_IzB+;E{u~3Jwr4#W?^WdV zA3VI8J>FjFtapa3R{6@u;R}Sx9_on*2_V%oAO({)q35bViNx>uPR8f$UI_p_}0ItFKmo@!J3&{Ez3cT`y4_HR_yYrS6uf47M9*?_sy(^?u5mnTr|GZsE?~#Xi zS-FD_YME~@a%(dgK(f&1&$JkDeWQ&J@Ac(e)X*wDqPhbCXc6+N6-+!>#y0huqeH>+&yhJ^Gg?e&` zFgpioDJ+pfV=A_KojRQ!+BOH+Hn4Y-#@|9aKR?yh9!fE9E<5x)5EX?GICO*IRU}a~ zQ5G5q2r&rjT4q&FPZ6d^s742pPpgKzLbmm3hVEbut^O7&*T=;qVO#<+>!N5cNs%Bq zky{3}r93pZr7}RAJPzn&Bansfz=VJN#)Ljiy(kXQ)iq!%ynGHBjymN=vCL-1(NUj2 zSuaes*}Sfad=qxjt0ZiQCH10iZnils9f0J{I?`H%+yi^ZGFfS%d?5h$%dbSnM`f{r zc1u7*s+LWe9(uaucA5N@@xmO~QNU5jK$cNmR#k4fEt+)%+M>uXtus7-Q{dOHd-coV z)mi__gZjz6jk)(VXm9q%MN#g!G+9(0cM+z?(w%}(0tiq~9kLM1cwNzcCpN)zNtf`S zPq^Iw{`{Z+3+VOwq!u74I037DkCmQfygRbdxi%^&s%aks5G%xLhPoUHu*LFBz^pub ziu(L($zgOm1^O$S(h@bTTNq9wLMl+S`eHs44?)2cZ~*d*+Gbh^P||3lcp=7xy_cM* zlXxtel0KB{VQ2_MzEAQi&(X~Jh%xjepfav$JYWqf0$ZF2a^tDo6v>BqHtLJ?^|^A& zzSx<{NVhqtNpGfx!vZ3YZ}#y1oj#U&nMvY15DXY{6HwXQZSWh89+?S^88+Jck|}*A z!Q=44>O-C7T=9*(j_kLUNj}8&uTd_wN z-W$Vx6*$m^oIUTHx#r7eo*S3bQnK>Xk>mS0`ius^60(%51xYqT^KJmAAR!8kt4mxlR;NjB@2binigLuK_0iH3a5+x^R*bKZn;L*Cnl7c4`v{hz+j?TnuUQ zipMXN$7BT*VZBGQBx+hW0kfd!yD`4WJC4@R4hFYYlV>Oh-Y&Cq1>FG1f=AafQ`@-VyEv=*IU%@nXF)lqK zS^{XTP9M$}5HQftHuI`L3k$gPToMB1t^qqTPV;?Qwe@Y(DOXt-^)3_i{@CPw0+G`( z7svO-qRH9GqH7goBv>(|Yb+*7PRCRGTpj8FX(_sMwIlyeIxWd18ikJ~Rk2tU1Dwe2lC%iylgmX=;FZZ5whf!|`b0edXzs{k8hcZI@SX-B0g5 z*eY(Xht|LyWKJrNuMwx>eF^U?#abnqT*K4jBrJxsV4IiyK41|jMylNMxXWjr8<*3- zFv@sm8%sbd6YdiZ(bCDIw+kND%4&XA#d_+zQx0DDHQ~}TkzJO9p z>-NI?YHhU7@?ydJ8ZW%BeN^K&l8z9Wo)iEJlDf_6DfB(d zZL^Z$nP-tfizi^(z`qq+mBOg5JU4fV42GP2H!u&!Ov zhc9}<#1=-j%jf0+0w5Fb8hW}kLoC%x8!gDT&oMpz3TeEH!p+d@uOREJqjCd`t3q0u ze9=dDuqu6W%aT2plaPhJsXP$DIS|9 z_8Cr6*PjbNC?)eArN3cSX>G{f41L+JNe>8#Y8yo^-v@(kh9|dsSm_r+ZoPk_D;i{X zt_kN%82sI~C$CM6iTs#YZzUKw=C&B>n(5wPhz>|1A6j{IIGtqpY;TeKTCRuLow@h* zaGv|x%(pjUtCtSEn=TUfuMnn(qV^ifF;Wok%gFpN%x)^mQoPZEsD3xyumX?JB9Ffe zQ^w!5D3P(QVUP)vkSuRZrJD_xO8%vQljlFg*^9p>V3zl`;M?nS8z2)v>mrHw6nS1d zT8-BbMU6ws>GfpMLGVhvwCemB((Da7?EIzbLn{$?`v0tG~;ZDP|hIQ z{GQh&YujldAu|_qpLCdW6X;T;hwdE+exCOZa{N0&?AvJ)3Q$9gpw;lwSoI@2UCp5? zrx@@2MU3};jM3|#;^N?QOwV7V-M@;Ww*}W;hVwaphJ$g%KtNH8Kng&dNr6Dfw&Q)2 z=Pyv6evNSP5``P1-Ra?%pM2dvKRXo?0QmDJsjG~3jS?AdQUfbN!B2*oRFgNDp6`lg z+xpFqu(AF7=wAP0WXq3G`&IE6c)d#E1n(Wgpd@Prf0Li_&QZD63|Hg>9BrkN0IA#j z5vtSL>L_@Sv*DI&d>&u(4X$h<0)QhuB+G%llUlk#UK}BPcDNc&({{CW^Fe%kZ)@&- zg<{|ieHsaPAx@8w$_r)POZX12$jIw$RzA_V4(`l6)ObA4#YCR$GSb&n<8|+NWN)jn z5@#!v^u8!UDBhapB=`_(rBkF-95RgdeuUZXU&i>=_b@(vhT~ViM3@aF3l~ca{)q5!Dklj#!?fo*rSgP~$jzf%0M(w>C1jzS0-nvy}A!fF2l?T$#^8c|-YG$Zo zj!u6AsaQ?L(#`tby*q9`&Aiy`UgRz5d3=R5-A9-mBUM8{SocDk07=f!P|sdtW-IL^ z>GGQ3EI&=wWZo%%jq1B>QIa*yXYNw^EB>-OWaK>A)*G10AEyJ!F)_k=PDI8Cs1AE3 ziQ0=V{|Ahp|Iax8^1nfxov8p`wA2WwN0`9#>h-``aXqAfSXT(skrW8Zx%Jx-8|!OW zzw{ToXrG9Uc=@D6*O%R$~F+YI=ckvdGlSxZR%9N%aOgki`BUgfph^HOdd^*J$JfIKx82y%W{m5 zzXFcFhb$}5%qLlo5MbChn${?*03g|(Nf$*!DpZwv6BT?o!`gBOo7>+)T>B#&`;Tx^ zZ=s3>JtDn+T^e~U9up9Q0GjAk#d?Ce9HXwraEOQiJxb4H4MT6XB{9!qWpSJs0g##W z2C3F8%nCGfNA`i|xnTE`LSgrtHZJnV#E^;JeTI`^!lyeGe)&AdNxX^Wtq0iLyaFGm zs76O3l~a$^`>IExv7fZ(Q9a0p3I=;IZAG%VFrPP^L{VsoG2!rZM!H3l>&D!gYoH@5 zue|S<&OA3RsTq%i#++F+00!___9*f_2a-eqF?uPe^gU{bX^*0I1elOZ?A1{%+VKp> z^x9%HQ=AZMV6ul+xr5F28{EH^KqHK-BIWIp#f zlPu3Nnfi_@db~WW@ylm5zC7%sz4io8-v0csfH|md z@~rFvIi4!K)b$^Z^><-d3y^*GVvMsVe*w2nG0fBWj{r8Ccs%V{6n+rqT0RM)E!;_13U_%vP{6;z|{<3L-IU-=MQiU++R zsOdgYXK>ERFYt6G=T^{^wVsnzbEukaJsUx!Tqb*1aFqJ-6|VJ<@#tn7x3}-3xbg=$ zZT&tD>upTZK$_+qI2B2Vy?F~+UH~qK_c`FanWoQ>m|-}9iHZFX&;&Yi!OVSa=-BIq z_at-jJfsaUv?NHudU)&v@a&Cj0+}>zBf#V3QH`H}UE%YCCFCoQ@!p5u!3U3SV6#7k zo4%0of%R}#+JUF zFpPdmC#xZnFv{>$e~H*7U_)9eYUr4LTwl+y+!JD@{FPNN#$F%jVgvv|L$xG2Y0@!( zdVGjDdW}}KgH^wWdsj-_-`>FL+)FE${|Ni>3C_Z%sMh){6C!UWn(#f*gLiowS=NHf z1ki*uO8Fpyu}V6#5(+4xSYeGnK=3xi*u*>$pZ-SfMnOxflMSzmxiSl8tUkiDSE)u> zCMqPKye#pvuRV^c8@PJsTXXO0iKxqV;~|20Uppdq9%m;)i;<9W2HUK{_}pZeLQGtZ4nX#yIKjVGp0(KKQc*bIxCfX47sUMJfM1aJt5yvJ}=feiwX zk*XR}am*ZWV?9H!rBc!SEJ!}e3d9@6zIbhtdn*y^A!OLZcn^N|3jOd3x0X)v@J0)_ zwjUth{6if2@8V#xfobK@9$XR9ZPs2w-daM|>I=Ae-;&QE@Dhv|>Jq5Dvy=;ZVNGcu zR@u}5OX!5nk$v-G<*a!=0z~d*<|qORnf3USFG_s6zXZSZ7*9UM_&0dN~ z=A4wQIj{?%r?@0}OIB|A$-F*RQAxsO`apAV?Aas@M8H^nH=pfJ@x{&rlbPIa8A99_ z5L-0M_weoeeSCDUhk2Wm2zWZkBHr2IS&4SxP3MLELqQb}egjBIdf6;r{0Enx+0QV= zhJTG|s&=xF4G$f)=;1yZA@ zg@@)nb61{~RAzYq@2F6#PqnOk(}^Ovn8zSA6jVvt<)_Jm-v~FU6NbinnTtpnFsui7 zJ-&)l{|FApxW0Xkt&Jt19>Rr1?&};8i!gnxC|bkGgCF6%X6L|=j0=afXkHZFDEKP( zEjGb81cKmb7A2(+A`q0Cvfj(3@N;xl;{NrX%;SXj$bE*@L5`K4!`?~2i-U=_ZF#9` ze)(#OTbnI(+dwdL7Z&V8^i0yN!r0!|r$;KR^BeKfm}BOqTy9Y(M-K-n)Ml*Ow>AXFCYP z7l@O6$uXvSsMLw5VCp2Fcydv(lOTb7H_)qsY|B1|+LnyUYm{T;+>%BZO@k=de!V+I z6hLx#aHES4?hY2SHf6@c^($>8If0XKQjROU%<9}0Z8iXq&{SlN+O3S_SJQVX#9{K~ zWd9{JZd^_Sz+6^@r3WO^2>8&Usb?=C6xfm@1|`eViz8U^wS!ng3{j~S7_=iY5G+k@ z%nIZgu9m610(Mfilk0_qeX-W*=V#K_>1TWB#XVf_pW@#780+n0oa}#z&p!VW&v%EI z#wAhpD6$0a^^ZX!s5rpri&@b|hBopx+hQnzZMbBn2%w}}dSYg2YJ(|tBF7|j@!9Du z{P5^^@kRMZSi1H*`0j^y@q6#B;La7G2#2UIUP|aF!#b4on_{%e9Xh@0>FXiRM-_4^ zNFafWrGPkUlhJ#(*9^z-&pDMgPblY>dEn8_E^b_D$#vY`YR|Vja{Spvjjvx%@Y|gU zKHnKj#S#fJ@3Y9H(H-8)G%2l&GYmGrBh3QDr8Ccs%V`N5Zw9CN9IG|o?7@spA}s)h zqmE^>1Sb0Bc&%(IFp*Oxr-=e!lnaXJ~*Sj~)) z=ag*?+hPPmUNPH7MG8oOcb37wD(m; zxJl*ld;)y(qQ;ND^4Oc*#M-qd`0lsf!ymlAj_vglow$!Me2Lj`7q!lM6iv7gI8>#f z!oh;#(b)_Da6Xi|r;zJmieHQc3qr5~WDUWcmGH*JB;dC%C)htJ<+E;I?T9B<2MDL| z>FWu8_0@3heNAz4ULhsuTvLdRrx6#ETBd((EEl4h8)HIQMJW;#g`DEMhKJPvV9mkN z^D`c%HY?6hh3V9`^=-rbHwEF>P^Itzl@k`ej)T9;n8~^;@eNQaCb9c zqc?>w4^WR@&+P!^3|pxq zokHeM^^jdSv8yc^G;_b5b{fL?MXeS4rfnQY@HwXLv<$-)Re6qBj&xv01qWf0a$Z5Aw!f3aJJE@~ ziXQLXUB=2FN4J$hkEhHvJ$L58*mMpH#FRMl4ic^&oR)|>qRl(Z0csvyl?gL4#?>JGn9mGTY7*V z$3#5|kL04sagC;M>S&R##(bD51`zl-_(Q22j{G$B-b`-Bel9s#1QAw0Ls*zeQnLub zuc6n4JQ%L$vE%Y~*`*uyKwj+g)tkc#Mng@8Wfsdug+)n8gBsdTFj1 z9&0$t$>T=Jf<=Tp&(!;>#ryi%&Jt$rN4WoR4q#8VaBF3ZPW>8bv?G;AlqWhWZ3e_j z2f##tX#s+y2e>1#N(zRS?Uok+Q|}6_N?4+6WLT=+9vUGlE3uOIl7#(>{TZ~FB+UGa zaeybm7YPuR+gn@hc?GkN?fIIblFBjbW-?6Wz3m=uY_y@pC)wX?XT0P@bzldA_&Xqb zI~NweQ!)vA2NSMYH4Fd@*#JB6159LV(%yuSja-*{S>lC(+j^`fXzA4?D^Mt}C7)zN z8-5;-g98Hl>p0$y&Vyc`79-?Zj7B}fWdX$+K{V1XkY>m5vwbXK7q^#>@WIU*5AIxt zTl*IF@Vj_5yN9!KAj4QP&(PYL#6AOgeOtT?VckzLPi`-~uN+^WUBl|!`}+3#ck$ls z0&DpJ^2tK5@Bm;ypT9Mv=TRz;69F$<34!vsJOB?kH8_G$qp{loe8@@1Nk&M5Rbvr2 zv>~Q!keWdq|4ed)Qem8fOoO=VoeleMT#+HdxPPs)$bqTkIOKENMJB)|^4c0=)`1@Q z{5=6;d^4$-``*Uook!LT_3By-`sku&zRJuFv^=S3R8sku(t245S|Oxb1_XdSLoHh& z)$>w*N7Y{=C~BBM0npUc&sU>lGq)W;fR50dkQ^l2w!4AnQeKsMfkCG@TOpxm4tc(hz6mA3yDS+;O2V`&QLQy#jI?{~GN+t+0(Rg37 z-x5z{pStT-?IA`A7_un9bHixp>v=WwB(*`0jo?oh9v8m{7joX9$5Ud6(|FiQ#Pd;< zQQ7U5;A-=Y;-!fa2g+mQ)jn4I9_}o^!MAQDY~Q{HxB6}Dgzw<_Xd4&RKp!mV;7ua8PYb8(r-jKdem ziZ*=Sfy=sLKJNj}i-*XH9dl1+(dyUcT1WaDnTk+jk=ZN;9Bep?X&LeBuSYmKtCjo= zVd-zJwoIh!92Pk)X;=F7)3G#SkBi7EzkILyVQ)?vf~WSCa0Vk8huVLedb zogUfSX&liO5y6BrLn|;R1xxNEP$aejp-~thK!U(U7C&q_8Iw_LXy(nB=yy#i8P&jS0HxwJ3Qg>mK!N2=M}01#K)* zOOVbNGRev>z$@<~YYk-5Rmys(lLuVEDk7_o91a6Jd5V3t&BfA9?7}vkLnAC9*&IM-=@|HV?tOXcx|K++yPS z9l`b;{o-vj11Oy6!vsFZmubFB_FD-b=C@!yAz9OJN@@l`xL3&IcgtEL)gIBC!=nCKuM=?r}f= zK-M8i@nB_;3zg&cRtLE<^5qzNKF&8lVyMJ$SiHMA`Bh|m&c{K)i33Tjw4Rm*_w&D0)lWh?4RwWBiI)an9ih9(rF9Ol zeI?-0tpT>zrdYzhD0IZhp^TbgTzo-`XgI|xvjwWu3<7B;{5r% zJK^bH0?t1{Cmo{|j*whN00s~f0a6zhRao@UZD&T*kw0hWiG~0H4|XUln!HOKpH~8E zpFSUoq*KZD~JKSa6zLv-Q+I@8~xZxGxVikl7u*q%o74MI$kgs(#qtLliyx4P(cNwwz%LHaDeeSaV!=(WYf^yR!E zAH<$cR!whSkz;`b1z#Q)BVdP+yc-AiwwfK3OGdtF;i2cneoQ9*IY5KnPBUNT05IJu z`)KSG&_---XSxkXu;TGRFqK6ZY6h>z^5R0UY=1Z#R(j7H1Oi{9f75~tKiAS$csyW3 zrMeC}4?sP&dJJgp%UR(++k*wW?U}eHa0t@YIQ!~PAsc=S_vZU(#Zw6*6LQ4F&?+TT zBN`3pO)!G@%n@=S;5jqgMXymnkwa;U+{qAO=LFPnZLN)7+j9VkNz6ldVwrc6^sj^D zzF?u9+%PRrVbIM4_=?WtplJT z5+6_pCZYAS3ETwN28$oO4zk`s9sLvqyh!(O)+zB0`^OVWj9agwIiQ18U5^fq(V2cK z3b%RQ0-QJGW(7fg@YP_3@7m$LFl1Q67c-20-d|hYgfrgHtc-@QC->VC5CNX&&3T27 zpO5hM-V{lb_`)0G@0<{3m-{*Nr-Z68nnqi#B$J`Lo8{_!A zgaezMzj<+ne!+#GV^wDj_FB)f*(PPWC-ke3XXxenH0<=U?a1GP?w9-i{9p#}JOH*q zc@78$K>e`2W0Ac^2#|f<&O%VJfb)K_7R|B`@AlDYWr)eic{(j`=6~un=smZE5%0Xd z##?9aJTjPHM6sIc5Ulor-U!1x!}rqHOxRm(ez1zgsw2;%fW+JzJt3RxvO`jspCWLY zN}0q(;It<`fbu=Rcvazi5-^@6yg82mfWz}3p!(vd!t3LOtW2#Qn@)Wai>5CV2GqE% zU_+3%9?imXz<%aOf>#)n(XfCEDUdQ&ho4G@?p;bRL0q*pX( z5bWtAnliV@D&!_0Sayw9+zBm|Aa(Fhv?+pyRd^+caFKnS z6{;a401GywiBv8z=oNUpy(FzTff;cC)`RlQPnji|^XeKro{QDlC*1`ES|gNb!*+0? zw6z>&_s^;*4E+!j#OoZ^QpKE*qBf(~ z0R0%@(coFJ9f+xR!^zUT z7F8`8)5;Bi$a@TFU>X19^*HMJH-rfpj*$wYlnM}}bPhnHX&eyg7nyUocTyws=0Op# zdVC|tqq*I?n&I)y0-MWTys=K<0retw4l8{2s>IniBKHIVeouS9jAu)YGbn{9Z?ZPwN!Kqm)1yo6;a(=Av(TsOY z05_qu8Dhp(<>QVe)`SYcz}<)Nwqq@jLVM&{}W(v6}+2s|dxC59tx!!;0z=DkC!$do>y zB7~VHsl7>9i?J$eO%v$c_F~||1ic!sMn023m=e#Gd7fV@kMR69%?VGQ-u42-9^dN8 zIx#0J-q-3N!@XoeX8bh=iqQ^Vi+BmyosBar7buAlV(z z7!C}Em~|c8-0<+>L)^G=!_5tEyZOuTB~DLITW4oyE8hF1xfNNKVQvmCCw;X65L~NU zZZ*RQ0MxVMVsd7JQSv8UwqX;{*4QdVuv0J2gulEtm~r@kgL;Upq#~Iix@!Xg1=NvE zQ>dZu5 zEBjOd(1T=N&<+{_KraR&L1Yr~7B{H_rrxHRd~U0c4DYof2$k3|15&gCX}l={Tyvq< z2_&LAB8jNXRq1y)8`U@))`lvxwv^8StcOV%v3p!Xlvwm6dRnF4KpMrL?N9X{iR8Dw zCq@sG19}_R3MB9k1FPLtRk?P%jlp2xrqk)d@V1(Nn=k(3k3Y^o{q)miuK+eTEG)}1 zpXa&vbCbtjA9}0VZvctaG;*a3iPkE?sz+rtyvV1D&ECw!hHgH{4q{D$Z8YAT37DJ> zX`JPDNs@ozjTJetKJb7x4{H?@hi3s_yejeaekJ^}LC4|RnwRtR+io6u7Fg;6vpV5? zlrWl6DMarFrzCMY6lHQ4KAKRpNkY}P!0Otyjjrf!2|!toqFHG)EO$!c@K}c!Y~`2N zz@KMDn&`!_6`sJO}lTZNWjoSwhD}IH+j~nO*tY7vlxUNw`>3NmfHoU^J7$ z0G_^{A_T~FURlZU(Y-!yUFk6Q)%;$DbH_1t=DTEbgP$+XIbYYcTV7svy+Q|8<&z?#=WZxr3Hs>+y12w z5u!x#*-nYig!Xzyh*WlV!Un^N=$a8S%gAg?Z1Ef%hJc9}XQoXp`&$k`9~8E!?}&Q+B`X z7f-Cy^3smvGmrCeko$ZwNx~!h;?)Gd{%Rz=G7W>}UM_WWp>y=R9xO7@7+N!omS%ET zZ!MLBdSYTlQDk{h zf#_9K_oQF`09M2{9E^WELU-u!0v&Q#d7e%wJ`OUhGk1(%*V}VyQ{YH0J>) znv=7p=UweNbP7ntZ0AiasCV4k~Kn)pJ?uA|RTAkikHiHAo8)FCrLqh<=J&+2~^vq&{@&Q?;P;DypiI@odgT*s7h?GZSONh!w{`$_6EaEdPulEsO+_30oNp zLl2N&{-*~4RSj8=hXH_iStv4cr$`xL`TlGa1kgTvSzV$_DoQoff#hE1 z@3^hG8>C3NZljRVkdB5f7HFWMoOsa|UKj#0oTqla>^GhuLvSj<2Nl**-4AAkFDEbo`K%&J6Lk+TM!5T0iuV~zLk z^szF?n3rq{B&v50FzmFUJTx+?>XiH9>bck(}{T`HG7F8ywksx2cY`<@;dhpnNx;;0UW+-Or`YZmtTPL)|Cu-2K7D}&Vjr*vCIQ^HnTa1 zrlJydK`@J8?;$-@(@RQ#Xrc=Nnu?#9hKrG!U-CX2sRhJX8-_FIW%*d@YB9A3hFl&1 zLr|WRd@Mt7CO3}f;yN{^d)F}uL;wBTJ#^b%a%9)nTZjPq9WS0&k$VBJ%zIp&zjxNH{tVP&@=Va-6c{#3YF?`LkZz;lZ^9rzux3lVhY9aC{!IHW052 zb%^4vO=grhVv)ju_tkUXO_(QITC?#ROwRX2C~#~0+gQE+0X81|9q`K!;g{~C4z-czaS6Ij zQf3|K>~k`=qevlAB_KI7hQB7qBgcHTSL5YTg-MyPzT~mo z1p=QM9GnH@Sz6@FwDk!Be9_2xvo(eU+=Y>Fth)cKsKF6rAYlyhTuH393WrQ7HIDnu zC^s4_hq9(CO4U|9#;R0^@pyd=`5b*eQ!bjfl#ymN6|jJ(=KmM=zB0&hE$Q-H=}I!Y z?BZs6^f50pGcz+Y@9$;i{h0Y1W}bOW&D>qg;BuK!($z{CSCMhU(fg)jdfWWW9r_FZ{iOhY1^n2I`t< z4kWIu74XvCDsC)!g;SN{Z{DujC%-29gKSo3_H6RG`BE4vw)|6)n9=-^KA(H zaa!fn$`od$F@287qUI|BV_wWJ{*jam~$9(dNmt=zZDF}j`tlGXa4A){*4!Z>>vETZ+hu3 z{+6fye zfB?-Ob}$~aV{0NBccJ_N((*o&V@>UT&Hth8)LT#{XtFL^#2$|sHyC7IgI^}C#8^;q zbuXa4NqS4cpv{sqMAabgYcNh_c{q1BCVmqUXX&~9Mu@FikpA)+_>F}_8t@CXd3qVA_Zt|qC9_2_PU~oJ+ZWj+;eE$9@Y;fK6S;zCobo4(4Il1SjOyB2F08M%eF`^ zsnZ?IIcoU(yMwU=5b|jRozit(NGWlAeEhaB>uY_&<>h57HVHsJK39bSj5c|p2TG|> zg&}%FzkX2f^zW|h;`Z_m;%)lE^PWoz3$a^`>Qn`_;d#8e{mpoD>l|wRatdfh^(bOz zNw*^fh++$Z5yD^y6$}vieF8S(nVJ2Y?17kda=l_t%B+}1Fq6j2Pcr#tVnPMCJB-li zDF_Wl$Gr$ky8(_`T5&;tHdiJgj76L?#kG9HM=50m{DAUlxUe8m$qU+F%nCr5P4D~} zxLrxl9pDz33D&s@EVkZf@VXGegniUvcvyOyue%9KCW07E?|cjsU^yfP zz3D_4Yz*_Y0DvEh)CBjQ+BOkx4RE7p!67slz^HHl%f^FUiBDX~#$H$+hwU;>&Q@Z} zVY-w>F5}X5+COUH&dN3(ZtSDk=@&8**(4j2_MW8?j&0jf;v{rBowtQqU#k*xgk07^#+C5H5S#l)xU;&8&AmEZ|EJEKz}YirzSJ5- z_oZzmoU#jR?;KZpY7=UN8|2c2tNZ`z@ zjdE5n-NTgTrXQ0ZQs@b3M|AvR)#A1%?HxHJe@(4UXb%+jn;|{FvhNe)pMQXnF;b4q zJSXSK)MTE@Yz$AwyaDXhFpM)u7{f%ka*7E+n9RY6$!SSoFfk{Jn~8kzE(T+enW3av zxi!wm@yG}xAO6myGDb_{2n0bu5l+0u#>U3m#;mXA zo5%f-76ALnv^(&7XitpRVHhR`TI(+&HWeR1Bw5e3F;mLo^lT9)W=r%YTl)>%THe9R zb`AZ(2%eirqBxi?xmcXeL%55$eUt#&GgvuZKp17{M=kX@e%Ky_jJItU!X_uvY|K^M zMPM@j34~4ku@vMK&)77wb3z|){1lnL1)lpYll=Uh?Ih3CoM~M+P8y36Q4T$FSCjM!|_KoL<`O- zGRT-*5>4AMuBn-ON>9ohLSp}$$4>}KhLM?&=Mb5EZi8nBqtBEb z<5=hnZM^!x!v|l<;Oa^dx!epcoS4E~HHUoGLDqB78;-HIQ^Wn$T^!U~$YnfSI=hH( zdFl*K%~vS}(r&jg==Viz7>$N>GYKJ7nzD(B=t(gydRJF1V<53&v!6f*xT@8RM4K6?ET{TC}0vhi^) z&de9#X6NzX_-TA;?HSxTT!e5d$QP^dvL#5b0LRNi*jZQvY7Qb;2B}tuKN{HBLj&i; z*w}+J$g+_Vh>YYut&P4%vzR~!8qMW}3~+kd0#PUE^N`V;#lxmTy`zZynsHf@#`kGx zNh-KV{g*jc(7DPvLE)$Zdz-=xzYHBV_83hCRuhOgGjC3cH`|<8W1qo-vEOX0*Ip0v z_-tdMr{UoluNK@04rcZn!isdAEP^p8cA+Nahrqxk1Ox0NJW@l21t8^E`e%I9w&T#z z#V@_+;_g-n*-Sb1z{;o;J>;_sD-!!l+nRh*j5W3G}z zA?woTI&O9GU}F!f+cmVi15B3-`1a>6;ya$Zgi{N%)G-ihf;aQ%EaWp@6dU@30eNT# z2L}l$J(0+Y-j#Y}@kJ1^Ju-p+*7OQkuzK|2p3P@jJM>9Km-_ zB41vFmzyFFO<0ok?7?h$Blp4$#q3RRSY+hN^kOEQnUcXhjl@+jTcKPBB$Ai{(N-aS@q3 zxyf@q$}1&qnGkjiS~foVx{Hs!p2Kl(8fO=0aAC1b706OP0{}RvxA9 z7{D&zTVpS5akfJLv{Uf1=za{zqDWyp8sXsRSj2`_t0`htnS|4sF#4m|@OCrnD|`bH zoAvlf{Ko>W=q%09P0Z|t zgh8la4@vXKc(j>d86H4r@aEyKoS$P37-1ZMxw1uhuzFj;kr8=kmrmH!o9Rl>dGfA+ zZSz3h!6>2}+U{`(Kjbze&lSmg@ehAeq16O525nZR;4%Jekw`Vs1YsV} z;j2+GG#BIC29;t`>)*WfxH;UMQV#QQ4k^v2Ci7#0t|`LE#+^-x54`H)Q*UO`8&|P7 zQ^Kja0_DFlo}}-5@2G|QF@WvXni!3JbUH0+@oUtNFkdZGM8S>Z9P0)_K=*v|8wf*{ z97on}cfy!iAl{7wdc{{jp?CtG*n|Ll#Dbg@&=8IVfsZf@AcO_aa{++9I29=k5CjIW zEgJwRoIE*O!inh|X38GMzEAmvn-8|JbI>5~BA<0pF6OXM%_DD(aQo^F{L=ehz!zRw z$5F>aCWLm%kfFM+cRE$!S!&S<;gkq8ElSL@z z=vV|HttpgToD!HW3Mc|=AnyU8VhQlxC?U%#Eg!=LgsDO*xonhGCt9NPXXBg|v(zU& zB4PA9PEaoRe3PT<4C$VTiDP2YQ=Aw{XNW=?ofDHF6-JLx%HU<9r_-#Y8eu85DUFql z%gZ)?{&|TP?&P7wG%lT<#>v?Ns>Lit556B_``|bZArEla=)xZdi6=z>wz;{5;b;iQ zkwl0!hOkg?ZmM|V!1FwKnGCYoET&3%6&oTI(p7)A0ojxAzM?M!tfI#RHwD1fmhl5v zgK-FH+oT^Bdye!a3L$8pumobD(?6_G^RPlXZ6WK$jk`1Wo@Y+r+`<$A&-&gmZZ0L% zjXFl-5ZR1Ng@Ku=JaS@)8?Sr;FTb&8u|OZ7PfkxSGD&d(uDobw=7rYF!E0%`JI z=QPrVwdPIjEf!{E32)|hWnF<26@jTdD51Z85K^eJedq@b|)AruUw%^CAb-sjPQ`nOoTJvEHWqkLRe#N8Wf`Pvjh@Fu~9uY9pML&Vf7espF;3NZy(Xi2zFZ zu}d+KeRm9K&z!Epl@<<;ySTHmOT~dsZ%8*^G3O>BDTSuR;yl(n33cOH+}b;Zo}VKL z7isJN=&U!!Q?K=YP?*KE3IU-(0Y<0ov-I!7?~) zte+Mt;MBB@vq=ukHirs(4sdG5f=B@tlU3R<1#Pert=Xr@Tr$NvVn)6XfI(vc0QShh zp*KtkAl6C)0Zpi202mMAnB<}{ftQWvoKBA813=!7*2kx9a4Qk*^IV$%XS*)&*=rs? z@>&j?M^(&E&n5sy{{XYDL&qnewX}JN<*h^FgGDO9pwqzN{tgZg4=K-;%Vyw62|*sO zMb|(&4xL*mg)mGkj4dFGB9&GuNu6c)poS~AAL6LhEzed6&#&BxNkvnXbrE)Zb~6(p~UbJJ&lx@o~&2` zVuHnQ&{4UE6TAiX#w^-zAgEHx^si`{@Bn$3FoPt_7BjP$z$4nCG=Wdv{@Q_s558)} zRmd#18)a0d$~Zk=K&6yHx!{4Mua8^geQg~y(bs~V{q`}A4|dS+G~ii^c#t&g7ohub zFc=~&j0D;n)9yTh7ZJie519;|14qYA++E(pz2z;mx_vm3h}>iTF5X$8rT97`voLG$ zkL~rP7?5rEM-M-LdF8Ku@#@;&h?~aWkhX|Im9*LPv2obJach9IaeI`w-9ezijDL7k z6i7^una^Z0WC>7Jb2es64yKD)>IxW+$5`DuBaOCUrm&_fn{G-ZX9Zl4kcJATsi`$`Rrx{yWe{N??Sbp9J% zMIKrhApt3(ZcLX1Hje`I$4XxpU@C9LU~cQFOD3rDa2Eh9TnQNSK~3rbnFb?6^2SiF zU~n+bWeOrPt%7oj6O}^^Xh^4io2nGh)Qrj3(!xYo0mYwzJ75{aiMOR-Og+ zsl12jQjUsFp$h4GJCbNWlq2&pt_H6RTvs=WAJ%dG?lSJKY@yli6Qw9V|9I=L{gWH} zja#t+fWq5&vA*5__5?h-5P$-I5ktK%3!t-8Yk&N6mskGMi#Im@L8CLc?n#?m@txxy zHV)e841JK-75GTiW@%d{=ZL(t?ZXDbP!aHzaxUhkJS@&kp&OmS<=q6(E@G!U4G0^aqd|$;Pe3{;iDhlL zN+@_-8Vf`6nq_l%U)BT8%vm@)OG9qcVIoAySl$b;w5PDL8&H)p**{kj$hknInCuxU z#6U(P<`4wjUX{+GUqglOAH6Cove?&|zByBEo}nZfOP5k8E69wej;WIQZn7E`q3vvx^m6I9WzH zp8+*-2T298K^(A{N$RiUB!G7e z{bHhO@r@5^6Z8vT|4o5T`nv$~Q3O8@Gr#cZHR+O4du8DGi#!L8Hmq!-zbx-Y{tn z=ZQ=}@>-&+(DQnbu)*7656x(l=WxudP;#_qsJ=2l^IY@EM)Q=(Q62FIzub2Kw9|4=Uu#bI|JL9#-&qL5QU8mmOCA_p$L=j}AN}x;fAZS*{>Ptr@z32{-n(-7{>em@4HEYj2}KZVuo|8^f%`z|Y}sy@LCP$2eQ5 zAptj?6Gudz zwPFMOko3t&eU_d_3P6gVUdZ92-Fxb+13%{l!;rZ~fofs|Q!~JA3-lh7*p=I|#@gDJ^ZRpM0X9-~X|4 zc042pT)C7Qzx|vu9`jSSjya==8_v^hcN}=aKbX z0=8yn08rTACfi##mN!0fW99Jq{&3s?;0YnJy0bum3RuF45Mk?}bvF)c@5T9$i_e^$ z`7Lp8=dX-!Pz znSG}8gAf3a(ilRn#0l^qVhXvyUP>6z&|W%fFczpaLBQ7Rrnw@0w>u!OW3~hYp&+U2 zjAPOJ={`?`k7dOMqw2>Zivp8vL=e26Jv*~v$;uNZQgWDT^2G#bjgdVIOO4^4_{|XG zLqvlI5SB~FgGCDIT3E(7Id1*prgB!iP3495n*w(?1vU>IsK~?V`4XiWGOj~aN)>^; zp?b4NK$aG4f^p)3b*Rh4(tV*}NS+jIOB%wPLYVq{)S<%4!us=B$Q6pn=8LdwiCVpZ zd^UsO*l(>Kc787YXZyb&Zr2~2KQ$wByNzOe-v%1MQZYqI+wU}w?6HCE>+h*qV6d|p z#ON{fzMS#-bq*~ERqx?e{b$$q8lSmza{711fb%DdIqzhx)u+~vI3ysjsJ8~IS03zq z=+0X0h4?;b0mumb`zxjF$DcVp{hQ7#lz(lr(<`;Q z1EO`zPZtT;+^hrFwUcla&921$popEdF3xlVq&cNuaRCUR5a_%aH)m5$3pOHCl+cO{ zJ$HjuuKOT8871t08VMIg39x}e$lV6DF`+vIf#j)Ki$bbE4;VJ*d7A378bEpY%A`l8 zLtO@^Xu|JiIcyc1ZFD?5*1Yr)mmbap3q zWzaVDs1AIImAyntI>gGh2hYi3zM4TUD=B&JQvOvDc*VWCBq>ces6zBRE%?I$f?#Z@ z{-*rFKm~AI&i~OU>!j zzyLr1_TVf~7l0bv+o=8gr(Rq7v++&vKYc$uNVq!9?&yBJ@8AB!C$IgTIF$To3}Ryd z1<>z`7jqs+r1U;&6R<2{3jz1I-QSJf`yc<*>ns2A-bVeG1|z@T>Gg4cZJ(4k4vyOZ zfUK7caLr(0I*V$#g2rG5wQdE+y%KKkRL~DgkPZ#vg-PsW4+eU=QrZH=Y;_*As{o`^ zf+~5$>%>a~=BEVFH0H||TrIkoTGjx1Xt-iOqm*m0p@ZA7|b_30$Jq)@{l1P`*PUldjgQj(B zk(Clz$@D_cb3dto?KmiwD_A&n4yDQr9O=MwEQCRL_{zvy27LV+$fK9MHQP%(C<Ke@iw{M>vs|8yK4-W&O0Pv_h6kIErx;1cm-?$Tfx?~g1h znZa&#hxN<%c78Fg6n*YnFV6qAZ@n=02iv_NI-5fja&62^6{tBqpK<6ve4rD%L8!1% zuVC5tFkd*v>Cz!)@_p9(((OAz$P9eTX7NM8`F5o3Oko1_0AZ8 zQvfWO#4CqeBvPqrW1fqspR zFK>xVKr{9$y`Sb~KnSll7&X>*8?XG9?>_ejv!wi)Oz|F}yF142Q42Gr91_O0=Q*jW zEH!nH?HmpVS?si`sATFmSE*sKlscLUB87*DLCoTu4=g~3q2P%@W}a8K4RM&t`tm7o ziB$SWC;N2*S~tL(;cKc$YfAFa^P0lUPjD^|VD4Cm$-c`pqz;9P^4VVlSM(4L>rg=t zAgLOmktsu7oBqrbLXHADPvrxy3j&yC&%)g;iQDTIe3cB#&0wZ1DRk2$QjB~bjrIV| zb~g#bj)kz9ho(1D<33-JRwoJ2(rq9j8c>}7imgO6AQgsi z9XrY8?#5NeAO6zSwI7Ys7h5`gQq=c#Pv85FzV{_K0to=_vSND=$leOn)Fad~#1o`q znCIdorUEB2`^M*Bugrq!!HqemM%$s@cInth_oNBa8zbQ2|F8^bmVTV9ocIChqLqsZ zgHe!#v(%chU2Eg8*+aFI$J}(0DyfdN;kgRgP{H@}@o^bk*_}qE(Zq$yAx=#7Auxhs zbTD`e1aL6DC}C6rSq57YXt9T7NH<4HbH~6#K0DBCS>j|l8m%5$ojw%3*23a^LUx_ObfrY(RQj6J*`^wSX)+QlI=_}?r;suX znM22UM+L`qk;xUw1CvsFU>49G2HP*+T>s%OTwVLQTBEy`dSCz|4Pq8}9r6A9=nCb# zHAnXNpvHjWYYPqU?a6p+5t~t3R%HBmtelk=xlY{z%bPWH`$Pdj)^$mrYP#%UVY&og zoW#}rXYu^!X?KfZTJbHSbvg z1JI++QWC4BJ;ljs;rtk!7ze=od?wBkzC>d{`9Mf(2o=DB(w`5;`y-?<)ugfATdGv`E}+k zLLL}kW3wi3W6i-<%|$+&r}2O;@#Z9XTA>O_HMrL3pxYY)l>C=CwK$8D^V29~GijR= z6KW$?QlxGWv-#b~f${tX3vMP$z~y)uKu`seNUn{}aP^J5o8KR|+Py!f|7-wob;K~# z_aB)2P3s}2Sje7uALV;MHo2%rfSN`2m5^j73lThah)qJLU|?2Z9hk><^^MZe05&o; z(CB@v#|pXm7fc#cky4~!K;?ER0Z^|dvA4&eLR=(3cNn1F>{6q8G3OB%EzxK9q=Y1r zCXjIi#z8VPrHYMa3#UqlI8$lDv&S@S1ki+5Lkfrs228RA9}u)%{j?r#0SXWv-*>6qVmq>D`jaGp1!HAf$kjJsN_ububx z9+1((F}%TVk=pDpQ?JYfHuEu8Ehk%>z(7it?(-R_G|;hEmepW3*3W14>oSiq{w`9H zVy3p>a=T8>Q(v9!*d$HuNgGpsNMP3PkJ0VL)<44jQ5&BfL|fHUO=vd&oZ(8ypjm~mSSSPjEep5+E>daXXrY*yrOH4KwAU{9*Dl1-{$ z7z!8kTFc@&kme#-A}H-LfDK`jlN_%-z;+=d^~oX8kzN#6$MObY_Ml0z-(uqZyGwr20xbvS3Nym(`$s@wjSeiQZiR78EBof5tZ15 z1w)eCb0p<#iUd+VS_zm9#%OhGG=Ozsb}9z7BK6jVic}a-%*pr|7b}e-w%R=`M6`H9OzIyKg**7Yv@t5fu z-Wq?Aj}v+-eqJ}5vjJxHQ-BkwL!=%YgItFJH}%@+Gdlm@&qx!UeURrRC4>|tT}(GV z0s?>({BmJVFJkaK`0$8V8oh!pv3+n5&j(27%I5!n|i=9C+C57u`#RUZhRZ|&AUl*5l2uHz!qK1ASMJ(+oT)lY(d+YnidkeT&O+rXXo{uFcfsUhQ zkAyM(AfWGIH0q<#XwrBzCZ%!_0-y$NzC)lDh5<-H)kUO?e9qy4a$n`j40&Rarm}ch z58DULt1(gff4_Qr5^tl0^8#cHY77{b31W|> zXU3XloUc=03t_f6WjMb_p8>SZodhMY=jTmc>r@vD1PbYp6vuIhs2TJ!hnka( z`23vT(_y4-*>J23M!vx9jXrMOxsRj0UF2kd(=$2ZU^zDBwMbO3-s;f|$h3$s9u0AL zSfhU=p;n-9Byq>+ABU8ZJT5^wM()GZvAAw)qW7nNP62&?PTB`aK$gl5Nyo}|S1Q~mQoQL4&UA^w>b6K>_mCT`D zM|!^vUW^K@A1cx{o1MyIx}2k%KcywuuB-d(ETkG_k(M!zEJaCX&&!Y}2cd8(8Co%2 zM7=%4-f@={JL3EqC9UK98HJqXVir=504wJe`a_B4xQJIaXEB|v;nH*+bEP3%CxD`E z0^)|H-WaWUB%0A^3NR??l$lw209DwtK7+x%Tsm(|9TfqNh&HfUnqli~QB!b`Ed% zCH@=OQv(F6lh7E z)(^3D*h z4=kmO@B|^l6^@J?k(u8jI;wFbH*dr2GB92TT8NJ(Y}EP=HXn9y_vRfm_STV=Aui14 zK^~a2D6bX&?Zn56JsL<&tr!pcI6khUR&PQ|o6K>j0ts&UqFD}fomCi8_{pD()mfD# zU_};=;&W_l@4?BIQ7t+2&qkAfqZp*V^tm^e{@2ar{VyhMZ!rnl&bZP|IEAs=N+%)O zo?EO~#at$ewZa?~pCF(0qHlj{F*tXk5+&ffymf@{e)i;K=>zXAGEtg*2QI-M`Ay#z z0O)62N5(ghbM?Wt7lT?Ff`S3+EkaHPvyis=c}X@&Ebi)aI2yp(FuMWz8qEACipGv@ z5pyBYJL~i#3bMvK+Q~{E0*aFJk6KhpvC!Ls zhkUTmB8QZm>|wSoC`oP+wi^WmIs|5JErN}$n!vRc7hClKT=!!rx)Dy#&j|B!Qvry1EDx zqJG@O&Owc+F}5usT_Vu}fQtKezwrFE)&IxdH2~SMb=`g1x39-DNk(y=ZNJ~P?ZR){ zwr!iAo~_uY2T(PW4Tv2f?hnvG!WO^U{&&XEEzqE4YsH zW%umTFvF4_s{ZNoiy^+@8n<6F(i!fnz{#n3eR8%YzpwD~AGkL7$n(djb(nXp=;7P8 z!o{UEcy(l9dM%YN&9=N>n*aPlV%gHZGZqqm=%J4;9_tK2>|FM z%gK722`I%-3{psNa}EI<-lOH_b2IZKvy5+KRCeYai`JhUD*(^cF_W|4m6-~hz-P8} zFTh}N4L`3(D>ZSY6>%O(QtA;9F-{tSEYl!MslY13TaB5EyxJ6(o+A|sgcvw`Nx<%t z8afh5`S1etRZJ-4@VG=ymAdz&zFJ#tsA(eM0C>%HTH+1@k3OF&Es5Gfo(o<;CFgp* zNyC+*T~O}y9%W^5Ml4iw-=u$o`GrO7M=lUiS!g=lH+3PWlflGn?W2#peEN?t1A87p zu!(=qCMuv*#y51YBL^@T>8}{C9GkXKDL8P3x})7kr_JZ~pM$Ub)SEiDTr~{;^vHfV zbb1!9-7=VEV6P#>&wbmc!G|%1QLhI1I@XhW&)8EJYc{6)<`wvAbU!`SgK`FLr$-6n+$ z{e)U?HH%?=fbQNw6?vWJ*NVaw)i z;59CSp(XyI@|u)$l+7tL(=_EhYMUg%Ju$Dt?h_%*)-0%&iqfTuGB4MTd0>rJ8~b0B zbtc)K?{{c-@jR_ni47R8e`x55h!k0s%`yFf$O$d;~J>wyTCc$1+39 z(lvbZif}07>zbe@zT91t)R5N>U|AS)0`g%D{BuWc&T4=9T|3)1Zr>=dr)EC0|EzOj za?WOZm^{*BZY@=M(oZ$p4DcBUdZ{TV-(<*RX4pVkpMRBV7??SJ+K%Dg&u|^1NB)p3 zt+xxd_v5kv$sc}It~@4~UgE;zHaoNNmeod6Ky z_;vu$zO=@5$+$I}Isz^eN?uxdL9RD7)N6|~u;sdqS!y1AAT5##w@rh-9;%4Y1afJ4 zjs8x^Yr$(n9kZ`yV+}$z{gpwqiCA%hK%Wy;3FH7s%d1#I zF2W*S!wBO2SeH0^?NjF$|NG-Fo&1aAlMBz`=WGCg#oo9)Y=VhW!SQ#F_6Ivgd&7T! z^04-K_g(E{CLHut3$T5p_f2G8S%`oA#jj4?%Hqh`OXd@MPCJp-iy2JJ>Y5iaDrmP&~~;i@HB8QXnKR48a@6mL@8bLXw}XSz7Q z6cc$7z-;QFMNRB!{X3SBe{y1K{s3lRg7r_^e7E@eZ5y;(c5U(wp1J_rhI@S^GHd-^ zg~)ZRw}1ndh4}6-zYi`SS(LGUdu;b98~YVG+<$W}v*6ATffpy!ras1EU?!C8Kv^T{L-64Txyrk|DShPK#fV0sK zFoSD?2v^B4MFLti?~_D0fT2oFow#W_0ogX#zI_|`^^2g1q_m_=$IvyRQLHos?mz+@ zo6-?LY;p)H7c9yu9gFyN#Ql<#l6oViIS2r9A8M!uz7UX*QrW&H-|3*Qt?nn8EQ