diff --git a/crates/resolver-tests/src/lib.rs b/crates/resolver-tests/src/lib.rs index dce94689eb3..36c4abf8ade 100644 --- a/crates/resolver-tests/src/lib.rs +++ b/crates/resolver-tests/src/lib.rs @@ -1,978 +1,978 @@ -use std::cell::RefCell; -use std::cmp::PartialEq; -use std::cmp::{max, min}; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::fmt; -use std::fmt::Write; -use std::rc::Rc; -use std::time::Instant; - -use cargo::core::dependency::Kind; -use cargo::core::resolver::{self, ResolveOpts}; -use cargo::core::source::{GitReference, SourceId}; -use cargo::core::Resolve; -use cargo::core::{Dependency, PackageId, Registry, Summary}; -use cargo::util::{CargoResult, Config, Graph, IntoUrl}; - -use proptest::collection::{btree_map, vec}; -use proptest::prelude::*; -use proptest::sample::Index; -use proptest::string::string_regex; -use varisat::{self, ExtendFormula}; - -pub fn resolve(deps: Vec, registry: &[Summary]) -> CargoResult> { - resolve_with_config(deps, registry, None) -} - -pub fn resolve_and_validated( - deps: Vec, - registry: &[Summary], - sat_resolve: Option, -) -> CargoResult> { - let resolve = resolve_with_config_raw(deps.clone(), registry, None); - - match resolve { - Err(e) => { - let sat_resolve = sat_resolve.unwrap_or_else(|| SatResolve::new(registry)); - if sat_resolve.sat_resolve(&deps) { - panic!( - "the resolve err but the sat_resolve thinks this will work:\n{}", - sat_resolve.use_packages().unwrap() - ); - } - Err(e) - } - Ok(resolve) => { - let mut stack = vec![pkg_id("root")]; - let mut used = HashSet::new(); - let mut links = HashSet::new(); - while let Some(p) = stack.pop() { - assert!(resolve.contains(&p)); - if used.insert(p) { - // in the tests all `links` crates end in `-sys` - if p.name().ends_with("-sys") { - assert!(links.insert(p.name())); - } - stack.extend(resolve.deps(p).map(|(dp, deps)| { - for d in deps { - assert!(d.matches_id(dp)); - } - dp - })); - } - } - let out = resolve.sort(); - assert_eq!(out.len(), used.len()); - - let mut pub_deps: HashMap> = HashMap::new(); - for &p in out.iter() { - // make the list of `p` public dependencies - let mut self_pub_dep = HashSet::new(); - self_pub_dep.insert(p); - for (dp, deps) in resolve.deps(p) { - if deps.iter().any(|d| d.is_public()) { - self_pub_dep.extend(pub_deps[&dp].iter().cloned()) - } - } - pub_deps.insert(p, self_pub_dep); - - // check if `p` has a public dependencies conflicts - let seen_dep: BTreeSet<_> = resolve - .deps(p) - .flat_map(|(dp, _)| pub_deps[&dp].iter().cloned()) - .collect(); - let seen_dep: Vec<_> = seen_dep.iter().collect(); - for a in seen_dep.windows(2) { - if a[0].name() == a[1].name() { - panic!( - "the package {:?} can publicly see {:?} and {:?}", - p, a[0], a[1] - ) - } - } - } - let sat_resolve = sat_resolve.unwrap_or_else(|| SatResolve::new(registry)); - if !sat_resolve.sat_is_valid_solution(&out) { - panic!( - "the sat_resolve err but the resolve thinks this will work:\n{:?}", - resolve - ); - } - Ok(out) - } - } -} - -pub fn resolve_with_config( - deps: Vec, - registry: &[Summary], - config: Option<&Config>, -) -> CargoResult> { - let resolve = resolve_with_config_raw(deps, registry, config)?; - Ok(resolve.sort()) -} - -pub fn resolve_with_config_raw( - deps: Vec, - registry: &[Summary], - config: Option<&Config>, -) -> CargoResult { - struct MyRegistry<'a> { - list: &'a [Summary], - used: HashSet, - }; - impl<'a> Registry for MyRegistry<'a> { - fn query( - &mut self, - dep: &Dependency, - f: &mut dyn FnMut(Summary), - fuzzy: bool, - ) -> CargoResult<()> { - for summary in self.list.iter() { - if fuzzy || dep.matches(summary) { - self.used.insert(summary.package_id()); - f(summary.clone()); - } - } - Ok(()) - } - - fn describe_source(&self, _src: SourceId) -> String { - String::new() - } - - fn is_replaced(&self, _src: SourceId) -> bool { - false - } - } - impl<'a> Drop for MyRegistry<'a> { - fn drop(&mut self) { - if std::thread::panicking() && self.list.len() != self.used.len() { - // we found a case that causes a panic and did not use all of the input. - // lets print the part of the input that was used for minimization. - println!( - "{:?}", - PrettyPrintRegistry( - self.list - .iter() - .filter(|s| { self.used.contains(&s.package_id()) }) - .cloned() - .collect() - ) - ); - } - } - } - let mut registry = MyRegistry { - list: registry, - used: HashSet::new(), - }; - let summary = Summary::new( - pkg_id("root"), - deps, - &BTreeMap::>::new(), - None::, - false, - ) - .unwrap(); - let opts = ResolveOpts::everything(); - let start = Instant::now(); - let resolve = resolver::resolve( - &[(summary, opts)], - &[], - &mut registry, - &HashSet::new(), - config, - true, - ); - - // The largest test in our suite takes less then 30 sec. - // So lets fail the test if we have ben running for two long. - assert!(start.elapsed().as_secs() < 60); - resolve -} - -const fn num_bits() -> usize { - std::mem::size_of::() * 8 -} - -fn log_bits(x: usize) -> usize { - if x == 0 { - return 0; - } - assert!(x > 0); - (num_bits::() as u32 - x.leading_zeros()) as usize -} - -fn sat_at_most_one(solver: &mut impl varisat::ExtendFormula, vars: &[varisat::Var]) { - if vars.len() <= 1 { - return; - } else if vars.len() == 2 { - solver.add_clause(&[vars[0].negative(), vars[1].negative()]); - return; - } else if vars.len() == 3 { - solver.add_clause(&[vars[0].negative(), vars[1].negative()]); - solver.add_clause(&[vars[0].negative(), vars[2].negative()]); - solver.add_clause(&[vars[1].negative(), vars[2].negative()]); - return; - } - // use the "Binary Encoding" from - // https://www.it.uu.se/research/group/astra/ModRef10/papers/Alan%20M.%20Frisch%20and%20Paul%20A.%20Giannoros.%20SAT%20Encodings%20of%20the%20At-Most-k%20Constraint%20-%20ModRef%202010.pdf - let bits: Vec = solver.new_var_iter(log_bits(vars.len())).collect(); - for (i, p) in vars.iter().enumerate() { - for b in 0..bits.len() { - solver.add_clause(&[p.negative(), bits[b].lit(((1 << b) & i) > 0)]); - } - } -} - -fn sat_at_most_one_by_key( - cnf: &mut impl varisat::ExtendFormula, - data: impl Iterator, -) -> HashMap> { - // no two packages with the same links set - let mut by_keys: HashMap> = HashMap::new(); - for (p, v) in data { - by_keys.entry(p).or_default().push(v) - } - for key in by_keys.values() { - sat_at_most_one(cnf, key); - } - by_keys -} - -/// Resolution can be reduced to the SAT problem. So this is an alternative implementation -/// of the resolver that uses a SAT library for the hard work. This is intended to be easy to read, -/// as compared to the real resolver. -/// -/// For the subset of functionality that are currently made by `registry_strategy` this will, -/// find a valid resolution if one exists. The big thing that the real resolver does, -/// that this one does not do is work with features and optional dependencies. -/// -/// The SAT library dose not optimize for the newer version, -/// so the selected packages may not match the real resolver. -#[derive(Clone)] -pub struct SatResolve(Rc>); -struct SatResolveInner { - solver: varisat::Solver<'static>, - var_for_is_packages_used: HashMap, - by_name: HashMap<&'static str, Vec>, -} - -impl SatResolve { - pub fn new(registry: &[Summary]) -> Self { - let mut cnf = varisat::CnfFormula::new(); - let var_for_is_packages_used: HashMap = registry - .iter() - .map(|s| (s.package_id(), cnf.new_var())) - .collect(); - - // no two packages with the same links set - sat_at_most_one_by_key( - &mut cnf, - registry - .iter() - .map(|s| (s.links(), var_for_is_packages_used[&s.package_id()])) - .filter(|(l, _)| l.is_some()), - ); - - // no two semver compatible versions of the same package - let by_activations_keys = sat_at_most_one_by_key( - &mut cnf, - var_for_is_packages_used - .iter() - .map(|(p, &v)| (p.as_activations_key(), v)), - ); - - let mut by_name: HashMap<&'static str, Vec> = HashMap::new(); - - for p in registry.iter() { - by_name - .entry(p.name().as_str()) - .or_default() - .push(p.package_id()) - } - - let empty_vec = vec![]; - - let mut graph: Graph = Graph::new(); - - let mut version_selected_for: HashMap< - PackageId, - HashMap>, - > = HashMap::new(); - // active packages need each of there `deps` to be satisfied - for p in registry.iter() { - graph.add(p.package_id()); - for dep in p.dependencies() { - // This can more easily be written as: - // !is_active(p) or one of the things that match dep is_active - // All the complexity, from here to the end, is to support public and private dependencies! - let mut by_key: HashMap<_, Vec> = HashMap::new(); - for &m in by_name - .get(dep.package_name().as_str()) - .unwrap_or(&empty_vec) - .iter() - .filter(|&p| dep.matches_id(*p)) - { - graph.link(p.package_id(), m); - by_key - .entry(m.as_activations_key()) - .or_default() - .push(var_for_is_packages_used[&m].positive()); - } - let keys: HashMap<_, _> = by_key.keys().map(|&k| (k, cnf.new_var())).collect(); - - // if `p` is active then we need to select one of the keys - let matches: Vec<_> = keys - .values() - .map(|v| v.positive()) - .chain(Some(var_for_is_packages_used[&p.package_id()].negative())) - .collect(); - cnf.add_clause(&matches); - - // if a key is active then we need to select one of the versions - for (key, vars) in by_key.iter() { - let mut matches = vars.clone(); - matches.push(keys[key].negative()); - cnf.add_clause(&matches); - } - - version_selected_for - .entry(p.package_id()) - .or_default() - .insert(dep.clone(), keys); - } - } - - let topological_order = graph.sort(); - - // we already ensure there is only one version for each `activations_key` so we can think of - // `publicly_exports` as being in terms of a set of `activations_key`s - let mut publicly_exports: HashMap<_, HashMap<_, varisat::Var>> = HashMap::new(); - - for &key in by_activations_keys.keys() { - // everything publicly depends on itself - let var = publicly_exports - .entry(key) - .or_default() - .entry(key) - .or_insert_with(|| cnf.new_var()); - cnf.add_clause(&[var.positive()]); - } - - // if a `dep` is public then `p` `publicly_exports` all the things that the selected version `publicly_exports` - for &p in topological_order.iter() { - if let Some(deps) = version_selected_for.get(&p) { - let mut p_exports = publicly_exports.remove(&p.as_activations_key()).unwrap(); - for (_, versions) in deps.iter().filter(|(d, _)| d.is_public()) { - for (ver, sel) in versions { - for (&export_pid, &export_var) in publicly_exports[ver].iter() { - let our_var = - p_exports.entry(export_pid).or_insert_with(|| cnf.new_var()); - cnf.add_clause(&[ - sel.negative(), - export_var.negative(), - our_var.positive(), - ]); - } - } - } - publicly_exports.insert(p.as_activations_key(), p_exports); - } - } - - // we already ensure there is only one version for each `activations_key` so we can think of - // `can_see` as being in terms of a set of `activations_key`s - // and if `p` `publicly_exports` `export` then it `can_see` `export` - let mut can_see: HashMap<_, HashMap<_, varisat::Var>> = HashMap::new(); - - // if `p` has a `dep` that selected `ver` then it `can_see` all the things that the selected version `publicly_exports` - for (&p, deps) in version_selected_for.iter() { - let p_can_see = can_see.entry(p).or_default(); - for (_, versions) in deps.iter() { - for (&ver, sel) in versions { - for (&export_pid, &export_var) in publicly_exports[&ver].iter() { - let our_var = p_can_see.entry(export_pid).or_insert_with(|| cnf.new_var()); - cnf.add_clause(&[ - sel.negative(), - export_var.negative(), - our_var.positive(), - ]); - } - } - } - } - - // a package `can_see` only one version by each name - for (_, see) in can_see.iter() { - sat_at_most_one_by_key(&mut cnf, see.iter().map(|((name, _, _), &v)| (name, v))); - } - let mut solver = varisat::Solver::new(); - solver.add_formula(&cnf); - - // We dont need to `solve` now. We know that "use nothing" will satisfy all the clauses so far. - // But things run faster if we let it spend some time figuring out how the constraints interact before we add assumptions. - solver - .solve() - .expect("docs say it can't error in default config"); - SatResolve(Rc::new(RefCell::new(SatResolveInner { - solver, - var_for_is_packages_used, - by_name, - }))) - } - pub fn sat_resolve(&self, deps: &[Dependency]) -> bool { - let mut s = self.0.borrow_mut(); - let mut assumption = vec![]; - let mut this_call = None; - - // the starting `deps` need to be satisfied - for dep in deps.iter() { - let empty_vec = vec![]; - let matches: Vec = s - .by_name - .get(dep.package_name().as_str()) - .unwrap_or(&empty_vec) - .iter() - .filter(|&p| dep.matches_id(*p)) - .map(|p| s.var_for_is_packages_used[p].positive()) - .collect(); - if matches.is_empty() { - return false; - } else if matches.len() == 1 { - assumption.extend_from_slice(&matches) - } else { - if this_call.is_none() { - let new_var = s.solver.new_var(); - this_call = Some(new_var); - assumption.push(new_var.positive()); - } - let mut matches = matches; - matches.push(this_call.unwrap().negative()); - s.solver.add_clause(&matches); - } - } - - s.solver.assume(&assumption); - - s.solver - .solve() - .expect("docs say it can't error in default config") - } - pub fn sat_is_valid_solution(&self, pids: &[PackageId]) -> bool { - let mut s = self.0.borrow_mut(); - for p in pids { - if p.name().as_str() != "root" && !s.var_for_is_packages_used.contains_key(p) { - return false; - } - } - let assumption: Vec<_> = s - .var_for_is_packages_used - .iter() - .map(|(p, v)| v.lit(pids.contains(p))) - .collect(); - - s.solver.assume(&assumption); - - s.solver - .solve() - .expect("docs say it can't error in default config") - } - fn use_packages(&self) -> Option { - self.0.borrow().solver.model().map(|lits| { - let lits: HashSet<_> = lits - .iter() - .filter(|l| l.is_positive()) - .map(|l| l.var()) - .collect(); - let mut out = String::new(); - out.push_str("used:\n"); - for (p, v) in self.0.borrow().var_for_is_packages_used.iter() { - if lits.contains(v) { - writeln!(&mut out, " {}", p).unwrap(); - } - } - out - }) - } -} - -pub trait ToDep { - fn to_dep(self) -> Dependency; -} - -impl ToDep for &'static str { - fn to_dep(self) -> Dependency { - Dependency::parse_no_deprecated(self, Some("1.0.0"), registry_loc()).unwrap() - } -} - -impl ToDep for Dependency { - fn to_dep(self) -> Dependency { - self - } -} - -pub trait ToPkgId { - fn to_pkgid(&self) -> PackageId; -} - -impl ToPkgId for PackageId { - fn to_pkgid(&self) -> PackageId { - *self - } -} - -impl<'a> ToPkgId for &'a str { - fn to_pkgid(&self) -> PackageId { - PackageId::new(*self, "1.0.0", registry_loc()).unwrap() - } -} - -impl, U: AsRef> ToPkgId for (T, U) { - fn to_pkgid(&self) -> PackageId { - let (name, vers) = self; - PackageId::new(name.as_ref(), vers.as_ref(), registry_loc()).unwrap() - } -} - -#[macro_export] -macro_rules! pkg { - ($pkgid:expr => [$($deps:expr),+ $(,)* ]) => ({ - let d: Vec = vec![$($deps.to_dep()),+]; - $crate::pkg_dep($pkgid, d) - }); - - ($pkgid:expr) => ({ - $crate::pkg($pkgid) - }) -} - -fn registry_loc() -> SourceId { - lazy_static::lazy_static! { - static ref EXAMPLE_DOT_COM: SourceId = - SourceId::for_registry(&"https://example.com".into_url().unwrap()).unwrap(); - } - *EXAMPLE_DOT_COM -} - -pub fn pkg(name: T) -> Summary { - pkg_dep(name, Vec::new()) -} - -pub fn pkg_dep(name: T, dep: Vec) -> Summary { - let pkgid = name.to_pkgid(); - let link = if pkgid.name().ends_with("-sys") { - Some(pkgid.name().as_str()) - } else { - None - }; - Summary::new( - name.to_pkgid(), - dep, - &BTreeMap::>::new(), - link, - false, - ) - .unwrap() -} - -pub fn pkg_id(name: &str) -> PackageId { - PackageId::new(name, "1.0.0", registry_loc()).unwrap() -} - -fn pkg_id_loc(name: &str, loc: &str) -> PackageId { - let remote = loc.into_url(); - let master = GitReference::Branch("master".to_string()); - let source_id = SourceId::for_git(&remote.unwrap(), master).unwrap(); - - PackageId::new(name, "1.0.0", source_id).unwrap() -} - -pub fn pkg_loc(name: &str, loc: &str) -> Summary { - let link = if name.ends_with("-sys") { - Some(name) - } else { - None - }; - Summary::new( - pkg_id_loc(name, loc), - Vec::new(), - &BTreeMap::>::new(), - link, - false, - ) - .unwrap() -} - -pub fn remove_dep(sum: &Summary, ind: usize) -> Summary { - let mut deps = sum.dependencies().to_vec(); - deps.remove(ind); - // note: more things will need to be copied over in the future, but it works for now. - Summary::new( - sum.package_id(), - deps, - &BTreeMap::>::new(), - sum.links().map(|a| a.as_str()), - sum.namespaced_features(), - ) - .unwrap() -} - -pub fn dep(name: &str) -> Dependency { - dep_req(name, "*") -} -pub fn dep_req(name: &str, req: &str) -> Dependency { - Dependency::parse_no_deprecated(name, Some(req), registry_loc()).unwrap() -} -pub fn dep_req_kind(name: &str, req: &str, kind: Kind, public: bool) -> Dependency { - let mut dep = dep_req(name, req); - dep.set_kind(kind); - dep.set_public(public); - dep -} - -pub fn dep_loc(name: &str, location: &str) -> Dependency { - let url = location.into_url().unwrap(); - let master = GitReference::Branch("master".to_string()); - let source_id = SourceId::for_git(&url, master).unwrap(); - Dependency::parse_no_deprecated(name, Some("1.0.0"), source_id).unwrap() -} -pub fn dep_kind(name: &str, kind: Kind) -> Dependency { - dep(name).set_kind(kind).clone() -} - -pub fn registry(pkgs: Vec) -> Vec { - pkgs -} - -pub fn names(names: &[P]) -> Vec { - names.iter().map(|name| name.to_pkgid()).collect() -} - -pub fn loc_names(names: &[(&'static str, &'static str)]) -> Vec { - names - .iter() - .map(|&(name, loc)| pkg_id_loc(name, loc)) - .collect() -} - -/// By default `Summary` and `Dependency` have a very verbose `Debug` representation. -/// This replaces with a representation that uses constructors from this file. -/// -/// If `registry_strategy` is improved to modify more fields -/// then this needs to update to display the corresponding constructor. -pub struct PrettyPrintRegistry(pub Vec); - -impl fmt::Debug for PrettyPrintRegistry { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "vec![")?; - for s in &self.0 { - if s.dependencies().is_empty() { - write!(f, "pkg!((\"{}\", \"{}\")),", s.name(), s.version())?; - } else { - write!(f, "pkg!((\"{}\", \"{}\") => [", s.name(), s.version())?; - for d in s.dependencies() { - if d.kind() == Kind::Normal - && &d.version_req().to_string() == "*" - && !d.is_public() - { - write!(f, "dep(\"{}\"),", d.name_in_toml())?; - } else if d.kind() == Kind::Normal && !d.is_public() { - write!( - f, - "dep_req(\"{}\", \"{}\"),", - d.name_in_toml(), - d.version_req() - )?; - } else { - write!( - f, - "dep_req_kind(\"{}\", \"{}\", {}, {}),", - d.name_in_toml(), - d.version_req(), - match d.kind() { - Kind::Development => "Kind::Development", - Kind::Build => "Kind::Build", - Kind::Normal => "Kind::Normal", - }, - d.is_public() - )?; - } - } - write!(f, "]),")?; - } - } - write!(f, "]") - } -} - -#[test] -fn meta_test_deep_pretty_print_registry() { - assert_eq!( - &format!( - "{:?}", - PrettyPrintRegistry(vec![ - pkg!(("foo", "1.0.1") => [dep_req("bar", "1")]), - pkg!(("foo", "1.0.0") => [dep_req("bar", "2")]), - pkg!(("foo", "2.0.0") => [dep_req("bar", "*")]), - pkg!(("bar", "1.0.0") => [dep_req("baz", "=1.0.2"), - dep_req("other", "1")]), - pkg!(("bar", "2.0.0") => [dep_req("baz", "=1.0.1")]), - pkg!(("baz", "1.0.2") => [dep_req("other", "2")]), - pkg!(("baz", "1.0.1")), - pkg!(("cat", "1.0.2") => [dep_req_kind("other", "2", Kind::Build, false)]), - pkg!(("cat", "1.0.3") => [dep_req_kind("other", "2", Kind::Development, false)]), - pkg!(("dep_req", "1.0.0")), - pkg!(("dep_req", "2.0.0")), - ]) - ), - "vec![pkg!((\"foo\", \"1.0.1\") => [dep_req(\"bar\", \"^1\"),]),\ - pkg!((\"foo\", \"1.0.0\") => [dep_req(\"bar\", \"^2\"),]),\ - pkg!((\"foo\", \"2.0.0\") => [dep(\"bar\"),]),\ - pkg!((\"bar\", \"1.0.0\") => [dep_req(\"baz\", \"= 1.0.2\"),dep_req(\"other\", \"^1\"),]),\ - pkg!((\"bar\", \"2.0.0\") => [dep_req(\"baz\", \"= 1.0.1\"),]),\ - pkg!((\"baz\", \"1.0.2\") => [dep_req(\"other\", \"^2\"),]),\ - pkg!((\"baz\", \"1.0.1\")),\ - pkg!((\"cat\", \"1.0.2\") => [dep_req_kind(\"other\", \"^2\", Kind::Build, false),]),\ - pkg!((\"cat\", \"1.0.3\") => [dep_req_kind(\"other\", \"^2\", Kind::Development, false),]),\ - pkg!((\"dep_req\", \"1.0.0\")),\ - pkg!((\"dep_req\", \"2.0.0\")),]" - ) -} - -/// This generates a random registry index. -/// Unlike vec((Name, Ver, vec((Name, VerRq), ..), ..) -/// This strategy has a high probability of having valid dependencies -pub fn registry_strategy( - max_crates: usize, - max_versions: usize, - shrinkage: usize, -) -> impl Strategy { - let name = string_regex("[A-Za-z][A-Za-z0-9_-]*(-sys)?").unwrap(); - - let raw_version = ..max_versions.pow(3); - let version_from_raw = move |r: usize| { - let major = ((r / max_versions) / max_versions) % max_versions; - let minor = (r / max_versions) % max_versions; - let patch = r % max_versions; - format!("{}.{}.{}", major, minor, patch) - }; - - // If this is false than the crate will depend on the nonexistent "bad" - // instead of the complex set we generated for it. - let allow_deps = prop::bool::weighted(0.99); - - let list_of_versions = - btree_map(raw_version, allow_deps, 1..=max_versions).prop_map(move |ver| { - ver.into_iter() - .map(|a| (version_from_raw(a.0), a.1)) - .collect::>() - }); - - let list_of_crates_with_versions = - btree_map(name, list_of_versions, 1..=max_crates).prop_map(|mut vers| { - // root is the name of the thing being compiled - // so it would be confusing to have it in the index - vers.remove("root"); - // bad is a name reserved for a dep that won't work - vers.remove("bad"); - vers - }); - - // each version of each crate can depend on each crate smaller then it. - // In theory shrinkage should be 2, but in practice we get better trees with a larger value. - let max_deps = max_versions * (max_crates * (max_crates - 1)) / shrinkage; - - let raw_version_range = (any::(), any::()); - let raw_dependency = ( - any::(), - any::(), - raw_version_range, - 0..=1, - Just(false), - // TODO: ^ this needs to be set back to `any::()` and work before public & private dependencies can stabilize - ); - - fn order_index(a: Index, b: Index, size: usize) -> (usize, usize) { - let (a, b) = (a.index(size), b.index(size)); - (min(a, b), max(a, b)) - } - - let list_of_raw_dependency = vec(raw_dependency, ..=max_deps); - - // By default a package depends only on other packages that have a smaller name, - // this helps make sure that all things in the resulting index are DAGs. - // If this is true then the DAG is maintained with grater instead. - let reverse_alphabetical = any::().no_shrink(); - - ( - list_of_crates_with_versions, - list_of_raw_dependency, - reverse_alphabetical, - ) - .prop_map( - |(crate_vers_by_name, raw_dependencies, reverse_alphabetical)| { - let list_of_pkgid: Vec<_> = crate_vers_by_name - .iter() - .flat_map(|(name, vers)| vers.iter().map(move |x| ((name.as_str(), &x.0), x.1))) - .collect(); - let len_all_pkgid = list_of_pkgid.len(); - let mut dependency_by_pkgid = vec![vec![]; len_all_pkgid]; - for (a, b, (c, d), k, p) in raw_dependencies { - let (a, b) = order_index(a, b, len_all_pkgid); - let (a, b) = if reverse_alphabetical { (b, a) } else { (a, b) }; - let ((dep_name, _), _) = list_of_pkgid[a]; - if (list_of_pkgid[b].0).0 == dep_name { - continue; - } - let s = &crate_vers_by_name[dep_name]; - let s_last_index = s.len() - 1; - let (c, d) = order_index(c, d, s.len()); - - dependency_by_pkgid[b].push(dep_req_kind( - dep_name, - &if c == 0 && d == s_last_index { - "*".to_string() - } else if c == 0 { - format!("<={}", s[d].0) - } else if d == s_last_index { - format!(">={}", s[c].0) - } else if c == d { - format!("={}", s[c].0) - } else { - format!(">={}, <={}", s[c].0, s[d].0) - }, - match k { - 0 => Kind::Normal, - 1 => Kind::Build, - // => Kind::Development, // Development has no impact so don't gen - _ => panic!("bad index for Kind"), - }, - p && k == 0, - )) - } - - let mut out: Vec = list_of_pkgid - .into_iter() - .zip(dependency_by_pkgid.into_iter()) - .map(|(((name, ver), allow_deps), deps)| { - pkg_dep( - (name, ver).to_pkgid(), - if !allow_deps { - vec![dep_req("bad", "*")] - } else { - let mut deps = deps; - deps.sort_by_key(|d| d.name_in_toml()); - deps.dedup_by_key(|d| d.name_in_toml()); - deps - }, - ) - }) - .collect(); - - if reverse_alphabetical { - // make sure the complicated cases are at the end - out.reverse(); - } - - PrettyPrintRegistry(out) - }, - ) -} - -/// This test is to test the generator to ensure -/// that it makes registries with large dependency trees -#[test] -fn meta_test_deep_trees_from_strategy() { - use proptest::strategy::ValueTree; - use proptest::test_runner::TestRunner; - - let mut dis = [0; 21]; - - let strategy = registry_strategy(50, 20, 60); - let mut test_runner = TestRunner::deterministic(); - for _ in 0..128 { - let PrettyPrintRegistry(input) = strategy - .new_tree(&mut TestRunner::new_with_rng( - Default::default(), - test_runner.new_rng(), - )) - .unwrap() - .current(); - let reg = registry(input.clone()); - for this in input.iter().rev().take(10) { - let res = resolve( - vec![dep_req(&this.name(), &format!("={}", this.version()))], - ®, - ); - dis[res - .as_ref() - .map(|x| min(x.len(), dis.len()) - 1) - .unwrap_or(0)] += 1; - if dis.iter().all(|&x| x > 0) { - return; - } - } - } - - panic!( - "In 1280 tries we did not see a wide enough distribution of dependency trees! dis: {:?}", - dis - ); -} - -/// This test is to test the generator to ensure -/// that it makes registries that include multiple versions of the same library -#[test] -fn meta_test_multiple_versions_strategy() { - use proptest::strategy::ValueTree; - use proptest::test_runner::TestRunner; - - let mut dis = [0; 10]; - - let strategy = registry_strategy(50, 20, 60); - let mut test_runner = TestRunner::deterministic(); - for _ in 0..128 { - let PrettyPrintRegistry(input) = strategy - .new_tree(&mut TestRunner::new_with_rng( - Default::default(), - test_runner.new_rng(), - )) - .unwrap() - .current(); - let reg = registry(input.clone()); - for this in input.iter().rev().take(10) { - let res = resolve( - vec![dep_req(&this.name(), &format!("={}", this.version()))], - ®, - ); - if let Ok(mut res) = res { - let res_len = res.len(); - res.sort_by_key(|s| s.name()); - res.dedup_by_key(|s| s.name()); - dis[min(res_len - res.len(), dis.len() - 1)] += 1; - } - if dis.iter().all(|&x| x > 0) { - return; - } - } - } - panic!( - "In 1280 tries we did not see a wide enough distribution of multiple versions of the same library! dis: {:?}", - dis - ); -} - -/// Assert `xs` contains `elems` -pub fn assert_contains(xs: &[A], elems: &[A]) { - for elem in elems { - assert!(xs.contains(elem)); - } -} - -pub fn assert_same(a: &[A], b: &[A]) { - assert_eq!(a.len(), b.len()); - assert_contains(b, a); -} +use std::cell::RefCell; +use std::cmp::PartialEq; +use std::cmp::{max, min}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::fmt; +use std::fmt::Write; +use std::rc::Rc; +use std::time::Instant; + +use cargo::core::dependency::Kind; +use cargo::core::resolver::{self, ResolveOpts}; +use cargo::core::source::{GitReference, SourceId}; +use cargo::core::Resolve; +use cargo::core::{Dependency, PackageId, Registry, Summary}; +use cargo::util::{CargoResult, Config, Graph, IntoUrl, Platform}; + +use proptest::collection::{btree_map, vec}; +use proptest::prelude::*; +use proptest::sample::Index; +use proptest::string::string_regex; +use varisat::{self, ExtendFormula}; + +pub fn resolve(deps: Vec, registry: &[Summary]) -> CargoResult> { + resolve_with_config(deps, registry, None) +} + +pub fn resolve_and_validated( + deps: Vec, + registry: &[Summary], + sat_resolve: Option, +) -> CargoResult> { + let resolve = resolve_with_config_raw(deps.clone(), registry, None); + + match resolve { + Err(e) => { + let sat_resolve = sat_resolve.unwrap_or_else(|| SatResolve::new(registry)); + if sat_resolve.sat_resolve(&deps) { + panic!( + "the resolve err but the sat_resolve thinks this will work:\n{}", + sat_resolve.use_packages().unwrap() + ); + } + Err(e) + } + Ok(resolve) => { + let mut stack = vec![pkg_id("root")]; + let mut used = HashSet::new(); + let mut links = HashSet::new(); + while let Some(p) = stack.pop() { + assert!(resolve.contains(&p)); + if used.insert(p) { + // in the tests all `links` crates end in `-sys` + if p.name().ends_with("-sys") { + assert!(links.insert(p.name())); + } + stack.extend(resolve.deps(p).map(|(dp, deps)| { + for d in deps { + assert!(d.matches_id(dp)); + } + dp + })); + } + } + let out = resolve.sort(); + assert_eq!(out.len(), used.len()); + + let mut pub_deps: HashMap> = HashMap::new(); + for &p in out.iter() { + // make the list of `p` public dependencies + let mut self_pub_dep = HashSet::new(); + self_pub_dep.insert(p); + for (dp, deps) in resolve.deps(p) { + if deps.iter().any(|d| d.is_public()) { + self_pub_dep.extend(pub_deps[&dp].iter().cloned()) + } + } + pub_deps.insert(p, self_pub_dep); + + // check if `p` has a public dependencies conflicts + let seen_dep: BTreeSet<_> = resolve + .deps(p) + .flat_map(|(dp, _)| pub_deps[&dp].iter().cloned()) + .collect(); + let seen_dep: Vec<_> = seen_dep.iter().collect(); + for a in seen_dep.windows(2) { + if a[0].name() == a[1].name() { + panic!( + "the package {:?} can publicly see {:?} and {:?}", + p, a[0], a[1] + ) + } + } + } + let sat_resolve = sat_resolve.unwrap_or_else(|| SatResolve::new(registry)); + if !sat_resolve.sat_is_valid_solution(&out) { + panic!( + "the sat_resolve err but the resolve thinks this will work:\n{:?}", + resolve + ); + } + Ok(out) + } + } +} + +pub fn resolve_with_config( + deps: Vec, + registry: &[Summary], + config: Option<&Config>, +) -> CargoResult> { + let resolve = resolve_with_config_raw(deps, registry, config)?; + Ok(resolve.sort()) +} + +pub fn resolve_with_config_raw( + deps: Vec, + registry: &[Summary], + config: Option<&Config>, +) -> CargoResult { + struct MyRegistry<'a> { + list: &'a [Summary], + used: HashSet, + }; + impl<'a> Registry for MyRegistry<'a> { + fn query( + &mut self, + dep: &Dependency, + f: &mut dyn FnMut(Summary), + fuzzy: bool, + ) -> CargoResult<()> { + for summary in self.list.iter() { + if fuzzy || dep.matches(summary) { + self.used.insert(summary.package_id()); + f(summary.clone()); + } + } + Ok(()) + } + + fn describe_source(&self, _src: SourceId) -> String { + String::new() + } + + fn is_replaced(&self, _src: SourceId) -> bool { + false + } + } + impl<'a> Drop for MyRegistry<'a> { + fn drop(&mut self) { + if std::thread::panicking() && self.list.len() != self.used.len() { + // we found a case that causes a panic and did not use all of the input. + // lets print the part of the input that was used for minimization. + println!( + "{:?}", + PrettyPrintRegistry( + self.list + .iter() + .filter(|s| { self.used.contains(&s.package_id()) }) + .cloned() + .collect() + ) + ); + } + } + } + let mut registry = MyRegistry { + list: registry, + used: HashSet::new(), + }; + let summary = Summary::new( + pkg_id("root"), + deps, + &BTreeMap::, Vec)>::new(), + None::, + false, + ) + .unwrap(); + let opts = ResolveOpts::everything(); + let start = Instant::now(); + let resolve = resolver::resolve( + &[(summary, opts)], + &[], + &mut registry, + &HashSet::new(), + config, + true, + ); + + // The largest test in our suite takes less then 30 sec. + // So lets fail the test if we have ben running for two long. + assert!(start.elapsed().as_secs() < 60); + resolve +} + +const fn num_bits() -> usize { + std::mem::size_of::() * 8 +} + +fn log_bits(x: usize) -> usize { + if x == 0 { + return 0; + } + assert!(x > 0); + (num_bits::() as u32 - x.leading_zeros()) as usize +} + +fn sat_at_most_one(solver: &mut impl varisat::ExtendFormula, vars: &[varisat::Var]) { + if vars.len() <= 1 { + return; + } else if vars.len() == 2 { + solver.add_clause(&[vars[0].negative(), vars[1].negative()]); + return; + } else if vars.len() == 3 { + solver.add_clause(&[vars[0].negative(), vars[1].negative()]); + solver.add_clause(&[vars[0].negative(), vars[2].negative()]); + solver.add_clause(&[vars[1].negative(), vars[2].negative()]); + return; + } + // use the "Binary Encoding" from + // https://www.it.uu.se/research/group/astra/ModRef10/papers/Alan%20M.%20Frisch%20and%20Paul%20A.%20Giannoros.%20SAT%20Encodings%20of%20the%20At-Most-k%20Constraint%20-%20ModRef%202010.pdf + let bits: Vec = solver.new_var_iter(log_bits(vars.len())).collect(); + for (i, p) in vars.iter().enumerate() { + for b in 0..bits.len() { + solver.add_clause(&[p.negative(), bits[b].lit(((1 << b) & i) > 0)]); + } + } +} + +fn sat_at_most_one_by_key( + cnf: &mut impl varisat::ExtendFormula, + data: impl Iterator, +) -> HashMap> { + // no two packages with the same links set + let mut by_keys: HashMap> = HashMap::new(); + for (p, v) in data { + by_keys.entry(p).or_default().push(v) + } + for key in by_keys.values() { + sat_at_most_one(cnf, key); + } + by_keys +} + +/// Resolution can be reduced to the SAT problem. So this is an alternative implementation +/// of the resolver that uses a SAT library for the hard work. This is intended to be easy to read, +/// as compared to the real resolver. +/// +/// For the subset of functionality that are currently made by `registry_strategy` this will, +/// find a valid resolution if one exists. The big thing that the real resolver does, +/// that this one does not do is work with features and optional dependencies. +/// +/// The SAT library dose not optimize for the newer version, +/// so the selected packages may not match the real resolver. +#[derive(Clone)] +pub struct SatResolve(Rc>); +struct SatResolveInner { + solver: varisat::Solver<'static>, + var_for_is_packages_used: HashMap, + by_name: HashMap<&'static str, Vec>, +} + +impl SatResolve { + pub fn new(registry: &[Summary]) -> Self { + let mut cnf = varisat::CnfFormula::new(); + let var_for_is_packages_used: HashMap = registry + .iter() + .map(|s| (s.package_id(), cnf.new_var())) + .collect(); + + // no two packages with the same links set + sat_at_most_one_by_key( + &mut cnf, + registry + .iter() + .map(|s| (s.links(), var_for_is_packages_used[&s.package_id()])) + .filter(|(l, _)| l.is_some()), + ); + + // no two semver compatible versions of the same package + let by_activations_keys = sat_at_most_one_by_key( + &mut cnf, + var_for_is_packages_used + .iter() + .map(|(p, &v)| (p.as_activations_key(), v)), + ); + + let mut by_name: HashMap<&'static str, Vec> = HashMap::new(); + + for p in registry.iter() { + by_name + .entry(p.name().as_str()) + .or_default() + .push(p.package_id()) + } + + let empty_vec = vec![]; + + let mut graph: Graph = Graph::new(); + + let mut version_selected_for: HashMap< + PackageId, + HashMap>, + > = HashMap::new(); + // active packages need each of there `deps` to be satisfied + for p in registry.iter() { + graph.add(p.package_id()); + for dep in p.dependencies() { + // This can more easily be written as: + // !is_active(p) or one of the things that match dep is_active + // All the complexity, from here to the end, is to support public and private dependencies! + let mut by_key: HashMap<_, Vec> = HashMap::new(); + for &m in by_name + .get(dep.package_name().as_str()) + .unwrap_or(&empty_vec) + .iter() + .filter(|&p| dep.matches_id(*p)) + { + graph.link(p.package_id(), m); + by_key + .entry(m.as_activations_key()) + .or_default() + .push(var_for_is_packages_used[&m].positive()); + } + let keys: HashMap<_, _> = by_key.keys().map(|&k| (k, cnf.new_var())).collect(); + + // if `p` is active then we need to select one of the keys + let matches: Vec<_> = keys + .values() + .map(|v| v.positive()) + .chain(Some(var_for_is_packages_used[&p.package_id()].negative())) + .collect(); + cnf.add_clause(&matches); + + // if a key is active then we need to select one of the versions + for (key, vars) in by_key.iter() { + let mut matches = vars.clone(); + matches.push(keys[key].negative()); + cnf.add_clause(&matches); + } + + version_selected_for + .entry(p.package_id()) + .or_default() + .insert(dep.clone(), keys); + } + } + + let topological_order = graph.sort(); + + // we already ensure there is only one version for each `activations_key` so we can think of + // `publicly_exports` as being in terms of a set of `activations_key`s + let mut publicly_exports: HashMap<_, HashMap<_, varisat::Var>> = HashMap::new(); + + for &key in by_activations_keys.keys() { + // everything publicly depends on itself + let var = publicly_exports + .entry(key) + .or_default() + .entry(key) + .or_insert_with(|| cnf.new_var()); + cnf.add_clause(&[var.positive()]); + } + + // if a `dep` is public then `p` `publicly_exports` all the things that the selected version `publicly_exports` + for &p in topological_order.iter() { + if let Some(deps) = version_selected_for.get(&p) { + let mut p_exports = publicly_exports.remove(&p.as_activations_key()).unwrap(); + for (_, versions) in deps.iter().filter(|(d, _)| d.is_public()) { + for (ver, sel) in versions { + for (&export_pid, &export_var) in publicly_exports[ver].iter() { + let our_var = + p_exports.entry(export_pid).or_insert_with(|| cnf.new_var()); + cnf.add_clause(&[ + sel.negative(), + export_var.negative(), + our_var.positive(), + ]); + } + } + } + publicly_exports.insert(p.as_activations_key(), p_exports); + } + } + + // we already ensure there is only one version for each `activations_key` so we can think of + // `can_see` as being in terms of a set of `activations_key`s + // and if `p` `publicly_exports` `export` then it `can_see` `export` + let mut can_see: HashMap<_, HashMap<_, varisat::Var>> = HashMap::new(); + + // if `p` has a `dep` that selected `ver` then it `can_see` all the things that the selected version `publicly_exports` + for (&p, deps) in version_selected_for.iter() { + let p_can_see = can_see.entry(p).or_default(); + for (_, versions) in deps.iter() { + for (&ver, sel) in versions { + for (&export_pid, &export_var) in publicly_exports[&ver].iter() { + let our_var = p_can_see.entry(export_pid).or_insert_with(|| cnf.new_var()); + cnf.add_clause(&[ + sel.negative(), + export_var.negative(), + our_var.positive(), + ]); + } + } + } + } + + // a package `can_see` only one version by each name + for (_, see) in can_see.iter() { + sat_at_most_one_by_key(&mut cnf, see.iter().map(|((name, _, _), &v)| (name, v))); + } + let mut solver = varisat::Solver::new(); + solver.add_formula(&cnf); + + // We dont need to `solve` now. We know that "use nothing" will satisfy all the clauses so far. + // But things run faster if we let it spend some time figuring out how the constraints interact before we add assumptions. + solver + .solve() + .expect("docs say it can't error in default config"); + SatResolve(Rc::new(RefCell::new(SatResolveInner { + solver, + var_for_is_packages_used, + by_name, + }))) + } + pub fn sat_resolve(&self, deps: &[Dependency]) -> bool { + let mut s = self.0.borrow_mut(); + let mut assumption = vec![]; + let mut this_call = None; + + // the starting `deps` need to be satisfied + for dep in deps.iter() { + let empty_vec = vec![]; + let matches: Vec = s + .by_name + .get(dep.package_name().as_str()) + .unwrap_or(&empty_vec) + .iter() + .filter(|&p| dep.matches_id(*p)) + .map(|p| s.var_for_is_packages_used[p].positive()) + .collect(); + if matches.is_empty() { + return false; + } else if matches.len() == 1 { + assumption.extend_from_slice(&matches) + } else { + if this_call.is_none() { + let new_var = s.solver.new_var(); + this_call = Some(new_var); + assumption.push(new_var.positive()); + } + let mut matches = matches; + matches.push(this_call.unwrap().negative()); + s.solver.add_clause(&matches); + } + } + + s.solver.assume(&assumption); + + s.solver + .solve() + .expect("docs say it can't error in default config") + } + pub fn sat_is_valid_solution(&self, pids: &[PackageId]) -> bool { + let mut s = self.0.borrow_mut(); + for p in pids { + if p.name().as_str() != "root" && !s.var_for_is_packages_used.contains_key(p) { + return false; + } + } + let assumption: Vec<_> = s + .var_for_is_packages_used + .iter() + .map(|(p, v)| v.lit(pids.contains(p))) + .collect(); + + s.solver.assume(&assumption); + + s.solver + .solve() + .expect("docs say it can't error in default config") + } + fn use_packages(&self) -> Option { + self.0.borrow().solver.model().map(|lits| { + let lits: HashSet<_> = lits + .iter() + .filter(|l| l.is_positive()) + .map(|l| l.var()) + .collect(); + let mut out = String::new(); + out.push_str("used:\n"); + for (p, v) in self.0.borrow().var_for_is_packages_used.iter() { + if lits.contains(v) { + writeln!(&mut out, " {}", p).unwrap(); + } + } + out + }) + } +} + +pub trait ToDep { + fn to_dep(self) -> Dependency; +} + +impl ToDep for &'static str { + fn to_dep(self) -> Dependency { + Dependency::parse_no_deprecated(self, Some("1.0.0"), registry_loc()).unwrap() + } +} + +impl ToDep for Dependency { + fn to_dep(self) -> Dependency { + self + } +} + +pub trait ToPkgId { + fn to_pkgid(&self) -> PackageId; +} + +impl ToPkgId for PackageId { + fn to_pkgid(&self) -> PackageId { + *self + } +} + +impl<'a> ToPkgId for &'a str { + fn to_pkgid(&self) -> PackageId { + PackageId::new(*self, "1.0.0", registry_loc()).unwrap() + } +} + +impl, U: AsRef> ToPkgId for (T, U) { + fn to_pkgid(&self) -> PackageId { + let (name, vers) = self; + PackageId::new(name.as_ref(), vers.as_ref(), registry_loc()).unwrap() + } +} + +#[macro_export] +macro_rules! pkg { + ($pkgid:expr => [$($deps:expr),+ $(,)* ]) => ({ + let d: Vec = vec![$($deps.to_dep()),+]; + $crate::pkg_dep($pkgid, d) + }); + + ($pkgid:expr) => ({ + $crate::pkg($pkgid) + }) +} + +fn registry_loc() -> SourceId { + lazy_static::lazy_static! { + static ref EXAMPLE_DOT_COM: SourceId = + SourceId::for_registry(&"https://example.com".into_url().unwrap()).unwrap(); + } + *EXAMPLE_DOT_COM +} + +pub fn pkg(name: T) -> Summary { + pkg_dep(name, Vec::new()) +} + +pub fn pkg_dep(name: T, dep: Vec) -> Summary { + let pkgid = name.to_pkgid(); + let link = if pkgid.name().ends_with("-sys") { + Some(pkgid.name().as_str()) + } else { + None + }; + Summary::new( + name.to_pkgid(), + dep, + &BTreeMap::, Vec)>::new(), + link, + false, + ) + .unwrap() +} + +pub fn pkg_id(name: &str) -> PackageId { + PackageId::new(name, "1.0.0", registry_loc()).unwrap() +} + +fn pkg_id_loc(name: &str, loc: &str) -> PackageId { + let remote = loc.into_url(); + let master = GitReference::Branch("master".to_string()); + let source_id = SourceId::for_git(&remote.unwrap(), master).unwrap(); + + PackageId::new(name, "1.0.0", source_id).unwrap() +} + +pub fn pkg_loc(name: &str, loc: &str) -> Summary { + let link = if name.ends_with("-sys") { + Some(name) + } else { + None + }; + Summary::new( + pkg_id_loc(name, loc), + Vec::new(), + &BTreeMap::, Vec)>::new(), + link, + false, + ) + .unwrap() +} + +pub fn remove_dep(sum: &Summary, ind: usize) -> Summary { + let mut deps = sum.dependencies().to_vec(); + deps.remove(ind); + // note: more things will need to be copied over in the future, but it works for now. + Summary::new( + sum.package_id(), + deps, + &BTreeMap::, Vec)>::new(), + sum.links().map(|a| a.as_str()), + sum.namespaced_features(), + ) + .unwrap() +} + +pub fn dep(name: &str) -> Dependency { + dep_req(name, "*") +} +pub fn dep_req(name: &str, req: &str) -> Dependency { + Dependency::parse_no_deprecated(name, Some(req), registry_loc()).unwrap() +} +pub fn dep_req_kind(name: &str, req: &str, kind: Kind, public: bool) -> Dependency { + let mut dep = dep_req(name, req); + dep.set_kind(kind); + dep.set_public(public); + dep +} + +pub fn dep_loc(name: &str, location: &str) -> Dependency { + let url = location.into_url().unwrap(); + let master = GitReference::Branch("master".to_string()); + let source_id = SourceId::for_git(&url, master).unwrap(); + Dependency::parse_no_deprecated(name, Some("1.0.0"), source_id).unwrap() +} +pub fn dep_kind(name: &str, kind: Kind) -> Dependency { + dep(name).set_kind(kind).clone() +} + +pub fn registry(pkgs: Vec) -> Vec { + pkgs +} + +pub fn names(names: &[P]) -> Vec { + names.iter().map(|name| name.to_pkgid()).collect() +} + +pub fn loc_names(names: &[(&'static str, &'static str)]) -> Vec { + names + .iter() + .map(|&(name, loc)| pkg_id_loc(name, loc)) + .collect() +} + +/// By default `Summary` and `Dependency` have a very verbose `Debug` representation. +/// This replaces with a representation that uses constructors from this file. +/// +/// If `registry_strategy` is improved to modify more fields +/// then this needs to update to display the corresponding constructor. +pub struct PrettyPrintRegistry(pub Vec); + +impl fmt::Debug for PrettyPrintRegistry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "vec![")?; + for s in &self.0 { + if s.dependencies().is_empty() { + write!(f, "pkg!((\"{}\", \"{}\")),", s.name(), s.version())?; + } else { + write!(f, "pkg!((\"{}\", \"{}\") => [", s.name(), s.version())?; + for d in s.dependencies() { + if d.kind() == Kind::Normal + && &d.version_req().to_string() == "*" + && !d.is_public() + { + write!(f, "dep(\"{}\"),", d.name_in_toml())?; + } else if d.kind() == Kind::Normal && !d.is_public() { + write!( + f, + "dep_req(\"{}\", \"{}\"),", + d.name_in_toml(), + d.version_req() + )?; + } else { + write!( + f, + "dep_req_kind(\"{}\", \"{}\", {}, {}),", + d.name_in_toml(), + d.version_req(), + match d.kind() { + Kind::Development => "Kind::Development", + Kind::Build => "Kind::Build", + Kind::Normal => "Kind::Normal", + }, + d.is_public() + )?; + } + } + write!(f, "]),")?; + } + } + write!(f, "]") + } +} + +#[test] +fn meta_test_deep_pretty_print_registry() { + assert_eq!( + &format!( + "{:?}", + PrettyPrintRegistry(vec![ + pkg!(("foo", "1.0.1") => [dep_req("bar", "1")]), + pkg!(("foo", "1.0.0") => [dep_req("bar", "2")]), + pkg!(("foo", "2.0.0") => [dep_req("bar", "*")]), + pkg!(("bar", "1.0.0") => [dep_req("baz", "=1.0.2"), + dep_req("other", "1")]), + pkg!(("bar", "2.0.0") => [dep_req("baz", "=1.0.1")]), + pkg!(("baz", "1.0.2") => [dep_req("other", "2")]), + pkg!(("baz", "1.0.1")), + pkg!(("cat", "1.0.2") => [dep_req_kind("other", "2", Kind::Build, false)]), + pkg!(("cat", "1.0.3") => [dep_req_kind("other", "2", Kind::Development, false)]), + pkg!(("dep_req", "1.0.0")), + pkg!(("dep_req", "2.0.0")), + ]) + ), + "vec![pkg!((\"foo\", \"1.0.1\") => [dep_req(\"bar\", \"^1\"),]),\ + pkg!((\"foo\", \"1.0.0\") => [dep_req(\"bar\", \"^2\"),]),\ + pkg!((\"foo\", \"2.0.0\") => [dep(\"bar\"),]),\ + pkg!((\"bar\", \"1.0.0\") => [dep_req(\"baz\", \"= 1.0.2\"),dep_req(\"other\", \"^1\"),]),\ + pkg!((\"bar\", \"2.0.0\") => [dep_req(\"baz\", \"= 1.0.1\"),]),\ + pkg!((\"baz\", \"1.0.2\") => [dep_req(\"other\", \"^2\"),]),\ + pkg!((\"baz\", \"1.0.1\")),\ + pkg!((\"cat\", \"1.0.2\") => [dep_req_kind(\"other\", \"^2\", Kind::Build, false),]),\ + pkg!((\"cat\", \"1.0.3\") => [dep_req_kind(\"other\", \"^2\", Kind::Development, false),]),\ + pkg!((\"dep_req\", \"1.0.0\")),\ + pkg!((\"dep_req\", \"2.0.0\")),]" + ) +} + +/// This generates a random registry index. +/// Unlike vec((Name, Ver, vec((Name, VerRq), ..), ..) +/// This strategy has a high probability of having valid dependencies +pub fn registry_strategy( + max_crates: usize, + max_versions: usize, + shrinkage: usize, +) -> impl Strategy { + let name = string_regex("[A-Za-z][A-Za-z0-9_-]*(-sys)?").unwrap(); + + let raw_version = ..max_versions.pow(3); + let version_from_raw = move |r: usize| { + let major = ((r / max_versions) / max_versions) % max_versions; + let minor = (r / max_versions) % max_versions; + let patch = r % max_versions; + format!("{}.{}.{}", major, minor, patch) + }; + + // If this is false than the crate will depend on the nonexistent "bad" + // instead of the complex set we generated for it. + let allow_deps = prop::bool::weighted(0.99); + + let list_of_versions = + btree_map(raw_version, allow_deps, 1..=max_versions).prop_map(move |ver| { + ver.into_iter() + .map(|a| (version_from_raw(a.0), a.1)) + .collect::>() + }); + + let list_of_crates_with_versions = + btree_map(name, list_of_versions, 1..=max_crates).prop_map(|mut vers| { + // root is the name of the thing being compiled + // so it would be confusing to have it in the index + vers.remove("root"); + // bad is a name reserved for a dep that won't work + vers.remove("bad"); + vers + }); + + // each version of each crate can depend on each crate smaller then it. + // In theory shrinkage should be 2, but in practice we get better trees with a larger value. + let max_deps = max_versions * (max_crates * (max_crates - 1)) / shrinkage; + + let raw_version_range = (any::(), any::()); + let raw_dependency = ( + any::(), + any::(), + raw_version_range, + 0..=1, + Just(false), + // TODO: ^ this needs to be set back to `any::()` and work before public & private dependencies can stabilize + ); + + fn order_index(a: Index, b: Index, size: usize) -> (usize, usize) { + let (a, b) = (a.index(size), b.index(size)); + (min(a, b), max(a, b)) + } + + let list_of_raw_dependency = vec(raw_dependency, ..=max_deps); + + // By default a package depends only on other packages that have a smaller name, + // this helps make sure that all things in the resulting index are DAGs. + // If this is true then the DAG is maintained with grater instead. + let reverse_alphabetical = any::().no_shrink(); + + ( + list_of_crates_with_versions, + list_of_raw_dependency, + reverse_alphabetical, + ) + .prop_map( + |(crate_vers_by_name, raw_dependencies, reverse_alphabetical)| { + let list_of_pkgid: Vec<_> = crate_vers_by_name + .iter() + .flat_map(|(name, vers)| vers.iter().map(move |x| ((name.as_str(), &x.0), x.1))) + .collect(); + let len_all_pkgid = list_of_pkgid.len(); + let mut dependency_by_pkgid = vec![vec![]; len_all_pkgid]; + for (a, b, (c, d), k, p) in raw_dependencies { + let (a, b) = order_index(a, b, len_all_pkgid); + let (a, b) = if reverse_alphabetical { (b, a) } else { (a, b) }; + let ((dep_name, _), _) = list_of_pkgid[a]; + if (list_of_pkgid[b].0).0 == dep_name { + continue; + } + let s = &crate_vers_by_name[dep_name]; + let s_last_index = s.len() - 1; + let (c, d) = order_index(c, d, s.len()); + + dependency_by_pkgid[b].push(dep_req_kind( + dep_name, + &if c == 0 && d == s_last_index { + "*".to_string() + } else if c == 0 { + format!("<={}", s[d].0) + } else if d == s_last_index { + format!(">={}", s[c].0) + } else if c == d { + format!("={}", s[c].0) + } else { + format!(">={}, <={}", s[c].0, s[d].0) + }, + match k { + 0 => Kind::Normal, + 1 => Kind::Build, + // => Kind::Development, // Development has no impact so don't gen + _ => panic!("bad index for Kind"), + }, + p && k == 0, + )) + } + + let mut out: Vec = list_of_pkgid + .into_iter() + .zip(dependency_by_pkgid.into_iter()) + .map(|(((name, ver), allow_deps), deps)| { + pkg_dep( + (name, ver).to_pkgid(), + if !allow_deps { + vec![dep_req("bad", "*")] + } else { + let mut deps = deps; + deps.sort_by_key(|d| d.name_in_toml()); + deps.dedup_by_key(|d| d.name_in_toml()); + deps + }, + ) + }) + .collect(); + + if reverse_alphabetical { + // make sure the complicated cases are at the end + out.reverse(); + } + + PrettyPrintRegistry(out) + }, + ) +} + +/// This test is to test the generator to ensure +/// that it makes registries with large dependency trees +#[test] +fn meta_test_deep_trees_from_strategy() { + use proptest::strategy::ValueTree; + use proptest::test_runner::TestRunner; + + let mut dis = [0; 21]; + + let strategy = registry_strategy(50, 20, 60); + let mut test_runner = TestRunner::deterministic(); + for _ in 0..128 { + let PrettyPrintRegistry(input) = strategy + .new_tree(&mut TestRunner::new_with_rng( + Default::default(), + test_runner.new_rng(), + )) + .unwrap() + .current(); + let reg = registry(input.clone()); + for this in input.iter().rev().take(10) { + let res = resolve( + vec![dep_req(&this.name(), &format!("={}", this.version()))], + ®, + ); + dis[res + .as_ref() + .map(|x| min(x.len(), dis.len()) - 1) + .unwrap_or(0)] += 1; + if dis.iter().all(|&x| x > 0) { + return; + } + } + } + + panic!( + "In 1280 tries we did not see a wide enough distribution of dependency trees! dis: {:?}", + dis + ); +} + +/// This test is to test the generator to ensure +/// that it makes registries that include multiple versions of the same library +#[test] +fn meta_test_multiple_versions_strategy() { + use proptest::strategy::ValueTree; + use proptest::test_runner::TestRunner; + + let mut dis = [0; 10]; + + let strategy = registry_strategy(50, 20, 60); + let mut test_runner = TestRunner::deterministic(); + for _ in 0..128 { + let PrettyPrintRegistry(input) = strategy + .new_tree(&mut TestRunner::new_with_rng( + Default::default(), + test_runner.new_rng(), + )) + .unwrap() + .current(); + let reg = registry(input.clone()); + for this in input.iter().rev().take(10) { + let res = resolve( + vec![dep_req(&this.name(), &format!("={}", this.version()))], + ®, + ); + if let Ok(mut res) = res { + let res_len = res.len(); + res.sort_by_key(|s| s.name()); + res.dedup_by_key(|s| s.name()); + dis[min(res_len - res.len(), dis.len() - 1)] += 1; + } + if dis.iter().all(|&x| x > 0) { + return; + } + } + } + panic!( + "In 1280 tries we did not see a wide enough distribution of multiple versions of the same library! dis: {:?}", + dis + ); +} + +/// Assert `xs` contains `elems` +pub fn assert_contains(xs: &[A], elems: &[A]) { + for elem in elems { + assert!(xs.contains(elem)); + } +} + +pub fn assert_same(a: &[A], b: &[A]) { + assert_eq!(a.len(), b.len()); + assert_contains(b, a); +} diff --git a/src/cargo/core/compiler/build_context/mod.rs b/src/cargo/core/compiler/build_context/mod.rs index 18956632152..3627d3e6b0a 100644 --- a/src/cargo/core/compiler/build_context/mod.rs +++ b/src/cargo/core/compiler/build_context/mod.rs @@ -1,284 +1,290 @@ -use std::collections::HashMap; -use std::path::{Path, PathBuf}; -use std::str; - -use log::debug; - -use crate::core::compiler::unit::UnitInterner; -use crate::core::compiler::{BuildConfig, BuildOutput, Kind, Unit}; -use crate::core::profiles::Profiles; -use crate::core::{Dependency, Workspace}; -use crate::core::{PackageId, PackageSet, Resolve}; -use crate::util::errors::CargoResult; -use crate::util::{profile, Cfg, Config, Rustc}; - -mod target_info; -pub use self::target_info::{FileFlavor, TargetInfo}; - -/// The build context, containing all information about a build task. -pub struct BuildContext<'a, 'cfg> { - /// The workspace the build is for. - pub ws: &'a Workspace<'cfg>, - /// The cargo configuration. - pub config: &'cfg Config, - /// The dependency graph for our build. - pub resolve: &'a Resolve, - pub profiles: &'a Profiles, - pub build_config: &'a BuildConfig, - /// Extra compiler args for either `rustc` or `rustdoc`. - pub extra_compiler_args: HashMap, Vec>, - pub packages: &'a PackageSet<'cfg>, - - /// Information about the compiler. - pub rustc: Rustc, - /// Build information for the host arch. - pub host_config: TargetConfig, - /// Build information for the target. - pub target_config: TargetConfig, - pub target_info: TargetInfo, - pub host_info: TargetInfo, - pub units: &'a UnitInterner<'a>, -} - -impl<'a, 'cfg> BuildContext<'a, 'cfg> { - pub fn new( - ws: &'a Workspace<'cfg>, - resolve: &'a Resolve, - packages: &'a PackageSet<'cfg>, - config: &'cfg Config, - build_config: &'a BuildConfig, - profiles: &'a Profiles, - units: &'a UnitInterner<'a>, - extra_compiler_args: HashMap, Vec>, - ) -> CargoResult> { - let rustc = config.load_global_rustc(Some(ws))?; - - let host_config = TargetConfig::new(config, &rustc.host)?; - let target_config = match build_config.requested_target.as_ref() { - Some(triple) => TargetConfig::new(config, triple)?, - None => host_config.clone(), - }; - let (host_info, target_info) = { - let _p = profile::start("BuildContext::probe_target_info"); - debug!("probe_target_info"); - let host_info = - TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Host)?; - let target_info = - TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; - (host_info, target_info) - }; - - Ok(BuildContext { - ws, - resolve, - packages, - config, - rustc, - target_config, - target_info, - host_config, - host_info, - build_config, - profiles, - extra_compiler_args, - units, - }) - } - - pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { - self.resolve - .extern_crate_name(unit.pkg.package_id(), dep.pkg.package_id(), dep.target) - } - - pub fn is_public_dependency(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> bool { - self.resolve - .is_public_dep(unit.pkg.package_id(), dep.pkg.package_id()) - } - - /// Whether a dependency should be compiled for the host or target platform, - /// specified by `Kind`. - pub fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { - // If this dependency is only available for certain platforms, - // make sure we're only enabling it for that platform. - let platform = match dep.platform() { - Some(p) => p, - None => return true, - }; - let (name, info) = match kind { - Kind::Host => (self.host_triple(), &self.host_info), - Kind::Target => (self.target_triple(), &self.target_info), - }; - platform.matches(name, info.cfg()) - } - - /// Gets the user-specified linker for a particular host or target. - pub fn linker(&self, kind: Kind) -> Option<&Path> { - self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) - } - - /// Gets the user-specified `ar` program for a particular host or target. - pub fn ar(&self, kind: Kind) -> Option<&Path> { - self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) - } - - /// Gets the list of `cfg`s printed out from the compiler for the specified kind. - pub fn cfg(&self, kind: Kind) -> &[Cfg] { - let info = match kind { - Kind::Host => &self.host_info, - Kind::Target => &self.target_info, - }; - info.cfg() - } - - /// Gets the host architecture triple. - /// - /// For example, x86_64-unknown-linux-gnu, would be - /// - machine: x86_64, - /// - hardware-platform: unknown, - /// - operating system: linux-gnu. - pub fn host_triple(&self) -> &str { - &self.rustc.host - } - - pub fn target_triple(&self) -> &str { - self.build_config - .requested_target - .as_ref() - .map(|s| s.as_str()) - .unwrap_or_else(|| self.host_triple()) - } - - /// Gets the target configuration for a particular host or target. - fn target_config(&self, kind: Kind) -> &TargetConfig { - match kind { - Kind::Host => &self.host_config, - Kind::Target => &self.target_config, - } - } - - /// Gets the number of jobs specified for this build. - pub fn jobs(&self) -> u32 { - self.build_config.jobs - } - - pub fn rustflags_args(&self, unit: &Unit<'_>) -> &[String] { - &self.info(unit.kind).rustflags - } - - pub fn rustdocflags_args(&self, unit: &Unit<'_>) -> &[String] { - &self.info(unit.kind).rustdocflags - } - - pub fn show_warnings(&self, pkg: PackageId) -> bool { - pkg.source_id().is_path() || self.config.extra_verbose() - } - - fn info(&self, kind: Kind) -> &TargetInfo { - match kind { - Kind::Host => &self.host_info, - Kind::Target => &self.target_info, - } - } - - pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { - self.extra_compiler_args.get(unit) - } -} - -/// Information required to build for a target. -#[derive(Clone, Default)] -pub struct TargetConfig { - /// The path of archiver (lib builder) for this target. - pub ar: Option, - /// The path of the linker for this target. - pub linker: Option, - /// Special build options for any necessary input files (filename -> options). - pub overrides: HashMap, -} - -impl TargetConfig { - pub fn new(config: &Config, triple: &str) -> CargoResult { - let key = format!("target.{}", triple); - let mut ret = TargetConfig { - ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val), - linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val), - overrides: HashMap::new(), - }; - let table = match config.get_table(&key)? { - Some(table) => table.val, - None => return Ok(ret), - }; - for (lib_name, value) in table { - match lib_name.as_str() { - "ar" | "linker" | "runner" | "rustflags" => continue, - _ => {} - } - - let mut output = BuildOutput { - library_paths: Vec::new(), - library_links: Vec::new(), - linker_args: Vec::new(), - cfgs: Vec::new(), - env: Vec::new(), - metadata: Vec::new(), - rerun_if_changed: Vec::new(), - rerun_if_env_changed: Vec::new(), - warnings: Vec::new(), - }; - // We require deterministic order of evaluation, so we must sort the pairs by key first. - let mut pairs = Vec::new(); - for (k, value) in value.table(&lib_name)?.0 { - pairs.push((k, value)); - } - pairs.sort_by_key(|p| p.0); - for (k, value) in pairs { - let key = format!("{}.{}", key, k); - match &k[..] { - "rustc-flags" => { - let (flags, definition) = value.string(k)?; - let whence = format!("in `{}` (in {})", key, definition.display()); - let (paths, links) = BuildOutput::parse_rustc_flags(flags, &whence)?; - output.library_paths.extend(paths); - output.library_links.extend(links); - } - "rustc-link-lib" => { - let list = value.list(k)?; - output - .library_links - .extend(list.iter().map(|v| v.0.clone())); - } - "rustc-link-search" => { - let list = value.list(k)?; - output - .library_paths - .extend(list.iter().map(|v| PathBuf::from(&v.0))); - } - "rustc-cdylib-link-arg" => { - let args = value.list(k)?; - output.linker_args.extend(args.iter().map(|v| v.0.clone())); - } - "rustc-cfg" => { - let list = value.list(k)?; - output.cfgs.extend(list.iter().map(|v| v.0.clone())); - } - "rustc-env" => { - for (name, val) in value.table(k)?.0 { - let val = val.string(name)?.0; - output.env.push((name.clone(), val.to_string())); - } - } - "warning" | "rerun-if-changed" | "rerun-if-env-changed" => { - failure::bail!("`{}` is not supported in build script overrides", k); - } - _ => { - let val = value.string(k)?.0; - output.metadata.push((k.clone(), val.to_string())); - } - } - } - ret.overrides.insert(lib_name, output); - } - - Ok(ret) - } -} +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::str; + +use log::debug; + +use crate::core::compiler::unit::UnitInterner; +use crate::core::compiler::{BuildConfig, BuildOutput, Kind, Unit}; +use crate::core::profiles::Profiles; +use crate::core::{Dependency, Workspace}; +use crate::core::{PackageId, PackageSet, Resolve}; +use crate::util::errors::CargoResult; +use crate::util::{profile, Cfg, Config, Platform, Rustc}; + +mod target_info; +pub use self::target_info::{FileFlavor, TargetInfo}; + +/// The build context, containing all information about a build task. +pub struct BuildContext<'a, 'cfg> { + /// The workspace the build is for. + pub ws: &'a Workspace<'cfg>, + /// The cargo configuration. + pub config: &'cfg Config, + /// The dependency graph for our build. + pub resolve: &'a Resolve, + pub profiles: &'a Profiles, + pub build_config: &'a BuildConfig, + /// Extra compiler args for either `rustc` or `rustdoc`. + pub extra_compiler_args: HashMap, Vec>, + pub packages: &'a PackageSet<'cfg>, + + /// Information about the compiler. + pub rustc: Rustc, + /// Build information for the host arch. + pub host_config: TargetConfig, + /// Build information for the target. + pub target_config: TargetConfig, + pub target_info: TargetInfo, + pub host_info: TargetInfo, + pub units: &'a UnitInterner<'a>, +} + +impl<'a, 'cfg> BuildContext<'a, 'cfg> { + pub fn new( + ws: &'a Workspace<'cfg>, + resolve: &'a Resolve, + packages: &'a PackageSet<'cfg>, + config: &'cfg Config, + build_config: &'a BuildConfig, + profiles: &'a Profiles, + units: &'a UnitInterner<'a>, + extra_compiler_args: HashMap, Vec>, + ) -> CargoResult> { + let rustc = config.load_global_rustc(Some(ws))?; + + let host_config = TargetConfig::new(config, &rustc.host)?; + let target_config = match build_config.requested_target.as_ref() { + Some(triple) => TargetConfig::new(config, triple)?, + None => host_config.clone(), + }; + let (host_info, target_info) = { + let _p = profile::start("BuildContext::probe_target_info"); + debug!("probe_target_info"); + let host_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Host)?; + let target_info = + TargetInfo::new(config, &build_config.requested_target, &rustc, Kind::Target)?; + (host_info, target_info) + }; + + Ok(BuildContext { + ws, + resolve, + packages, + config, + rustc, + target_config, + target_info, + host_config, + host_info, + build_config, + profiles, + extra_compiler_args, + units, + }) + } + + pub fn extern_crate_name(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> CargoResult { + self.resolve + .extern_crate_name(unit.pkg.package_id(), dep.pkg.package_id(), dep.target) + } + + pub fn is_public_dependency(&self, unit: &Unit<'a>, dep: &Unit<'a>) -> bool { + self.resolve + .is_public_dep(unit.pkg.package_id(), dep.pkg.package_id()) + } + + /// Whether a given platform matches the host or target platform, + /// specified by `Kind`. + pub fn platform_activated(&self, platform: Option<&Platform>, kind: Kind) -> bool { + let platform = match platform { + Some(p) => p, + None => return true, + }; + let (name, info) = match kind { + Kind::Host => (self.host_triple(), &self.host_info), + Kind::Target => (self.target_triple(), &self.target_info), + }; + platform.matches(name, info.cfg()) + } + + /// Whether a dependency should be compiled for the host or target platform, + /// specified by `Kind`. + pub fn dep_platform_activated(&self, dep: &Dependency, kind: Kind) -> bool { + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + self.platform_activated(dep.platform(), kind) + } + + /// Gets the user-specified linker for a particular host or target + pub fn linker(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).linker.as_ref().map(|s| s.as_ref()) + } + + /// Gets the user-specified `ar` program for a particular host or target. + pub fn ar(&self, kind: Kind) -> Option<&Path> { + self.target_config(kind).ar.as_ref().map(|s| s.as_ref()) + } + + /// Gets the list of `cfg`s printed out from the compiler for the specified kind. + pub fn cfg(&self, kind: Kind) -> &[Cfg] { + let info = match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + }; + info.cfg() + } + + /// Gets the host architecture triple. + /// + /// For example, x86_64-unknown-linux-gnu, would be + /// - machine: x86_64, + /// - hardware-platform: unknown, + /// - operating system: linux-gnu. + pub fn host_triple(&self) -> &str { + &self.rustc.host + } + + pub fn target_triple(&self) -> &str { + self.build_config + .requested_target + .as_ref() + .map(|s| s.as_str()) + .unwrap_or_else(|| self.host_triple()) + } + + /// Gets the target configuration for a particular host or target. + fn target_config(&self, kind: Kind) -> &TargetConfig { + match kind { + Kind::Host => &self.host_config, + Kind::Target => &self.target_config, + } + } + + /// Gets the number of jobs specified for this build. + pub fn jobs(&self) -> u32 { + self.build_config.jobs + } + + pub fn rustflags_args(&self, unit: &Unit<'_>) -> &[String] { + &self.info(unit.kind).rustflags + } + + pub fn rustdocflags_args(&self, unit: &Unit<'_>) -> &[String] { + &self.info(unit.kind).rustdocflags + } + + pub fn show_warnings(&self, pkg: PackageId) -> bool { + pkg.source_id().is_path() || self.config.extra_verbose() + } + + fn info(&self, kind: Kind) -> &TargetInfo { + match kind { + Kind::Host => &self.host_info, + Kind::Target => &self.target_info, + } + } + + pub fn extra_args_for(&self, unit: &Unit<'a>) -> Option<&Vec> { + self.extra_compiler_args.get(unit) + } +} + +/// Information required to build for a target. +#[derive(Clone, Default)] +pub struct TargetConfig { + /// The path of archiver (lib builder) for this target. + pub ar: Option, + /// The path of the linker for this target. + pub linker: Option, + /// Special build options for any necessary input files (filename -> options). + pub overrides: HashMap, +} + +impl TargetConfig { + pub fn new(config: &Config, triple: &str) -> CargoResult { + let key = format!("target.{}", triple); + let mut ret = TargetConfig { + ar: config.get_path(&format!("{}.ar", key))?.map(|v| v.val), + linker: config.get_path(&format!("{}.linker", key))?.map(|v| v.val), + overrides: HashMap::new(), + }; + let table = match config.get_table(&key)? { + Some(table) => table.val, + None => return Ok(ret), + }; + for (lib_name, value) in table { + match lib_name.as_str() { + "ar" | "linker" | "runner" | "rustflags" => continue, + _ => {} + } + + let mut output = BuildOutput { + library_paths: Vec::new(), + library_links: Vec::new(), + linker_args: Vec::new(), + cfgs: Vec::new(), + env: Vec::new(), + metadata: Vec::new(), + rerun_if_changed: Vec::new(), + rerun_if_env_changed: Vec::new(), + warnings: Vec::new(), + }; + // We require deterministic order of evaluation, so we must sort the pairs by key first. + let mut pairs = Vec::new(); + for (k, value) in value.table(&lib_name)?.0 { + pairs.push((k, value)); + } + pairs.sort_by_key(|p| p.0); + for (k, value) in pairs { + let key = format!("{}.{}", key, k); + match &k[..] { + "rustc-flags" => { + let (flags, definition) = value.string(k)?; + let whence = format!("in `{}` (in {})", key, definition.display()); + let (paths, links) = BuildOutput::parse_rustc_flags(flags, &whence)?; + output.library_paths.extend(paths); + output.library_links.extend(links); + } + "rustc-link-lib" => { + let list = value.list(k)?; + output + .library_links + .extend(list.iter().map(|v| v.0.clone())); + } + "rustc-link-search" => { + let list = value.list(k)?; + output + .library_paths + .extend(list.iter().map(|v| PathBuf::from(&v.0))); + } + "rustc-cdylib-link-arg" => { + let args = value.list(k)?; + output.linker_args.extend(args.iter().map(|v| v.0.clone())); + } + "rustc-cfg" => { + let list = value.list(k)?; + output.cfgs.extend(list.iter().map(|v| v.0.clone())); + } + "rustc-env" => { + for (name, val) in value.table(k)?.0 { + let val = val.string(name)?.0; + output.env.push((name.clone(), val.to_string())); + } + } + "warning" | "rerun-if-changed" | "rerun-if-env-changed" => { + failure::bail!("`{}` is not supported in build script overrides", k); + } + _ => { + let val = value.string(k)?.0; + output.metadata.push((k.clone(), val.to_string())); + } + } + } + ret.overrides.insert(lib_name, output); + } + + Ok(ret) + } +} diff --git a/src/cargo/core/compiler/context/mod.rs b/src/cargo/core/compiler/context/mod.rs index 9325c8c63e2..85771c572fe 100644 --- a/src/cargo/core/compiler/context/mod.rs +++ b/src/cargo/core/compiler/context/mod.rs @@ -1,561 +1,563 @@ -#![allow(deprecated)] -use std::collections::{HashMap, HashSet}; -use std::ffi::OsStr; -use std::fmt::Write; -use std::path::PathBuf; -use std::sync::Arc; - -use filetime::FileTime; -use jobserver::Client; - -use crate::core::compiler::compilation; -use crate::core::compiler::Unit; -use crate::core::{Package, PackageId, Resolve}; -use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{internal, profile, Config}; - -use super::build_plan::BuildPlan; -use super::custom_build::{self, BuildDeps, BuildScripts, BuildState}; -use super::fingerprint::Fingerprint; -use super::job_queue::JobQueue; -use super::layout::Layout; -use super::{BuildContext, Compilation, CompileMode, Executor, FileFlavor, Kind}; - -mod unit_dependencies; -use self::unit_dependencies::build_unit_dependencies; - -mod compilation_files; -use self::compilation_files::CompilationFiles; -pub use self::compilation_files::{Metadata, OutputFile}; - -pub struct Context<'a, 'cfg> { - pub bcx: &'a BuildContext<'a, 'cfg>, - pub compilation: Compilation<'cfg>, - pub build_state: Arc, - pub build_script_overridden: HashSet<(PackageId, Kind)>, - pub build_explicit_deps: HashMap, BuildDeps>, - pub fingerprints: HashMap, Arc>, - pub mtime_cache: HashMap, - pub compiled: HashSet>, - pub build_scripts: HashMap, Arc>, - pub links: Links, - pub jobserver: Client, - primary_packages: HashSet, - unit_dependencies: HashMap, Vec>>, - files: Option>, - package_cache: HashMap, - - /// A flag indicating whether pipelining is enabled for this compilation - /// session. Pipelining largely only affects the edges of the dependency - /// graph that we generate at the end, and otherwise it's pretty - /// straightforward. - pipelining: bool, - - /// A set of units which are compiling rlibs and are expected to produce - /// metadata files in addition to the rlib itself. This is only filled in - /// when `pipelining` above is enabled. - rmeta_required: HashSet>, -} - -impl<'a, 'cfg> Context<'a, 'cfg> { - pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { - // Load up the jobserver that we'll use to manage our parallelism. This - // is the same as the GNU make implementation of a jobserver, and - // intentionally so! It's hoped that we can interact with GNU make and - // all share the same jobserver. - // - // Note that if we don't have a jobserver in our environment then we - // create our own, and we create it with `n-1` tokens because one token - // is ourself, a running process. - let jobserver = match config.jobserver_from_env() { - Some(c) => c.clone(), - None => Client::new(bcx.build_config.jobs as usize - 1) - .chain_err(|| "failed to create jobserver")?, - }; - - let pipelining = bcx - .config - .get_bool("build.pipelining")? - .map(|t| t.val) - .unwrap_or(false); - - Ok(Self { - bcx, - compilation: Compilation::new(bcx)?, - build_state: Arc::new(BuildState::new(&bcx.host_config, &bcx.target_config)), - fingerprints: HashMap::new(), - mtime_cache: HashMap::new(), - compiled: HashSet::new(), - build_scripts: HashMap::new(), - build_explicit_deps: HashMap::new(), - links: Links::new(), - jobserver, - build_script_overridden: HashSet::new(), - - primary_packages: HashSet::new(), - unit_dependencies: HashMap::new(), - files: None, - package_cache: HashMap::new(), - rmeta_required: HashSet::new(), - pipelining, - }) - } - - // Returns a mapping of the root package plus its immediate dependencies to - // where the compiled libraries are all located. - pub fn compile( - mut self, - units: &[Unit<'a>], - export_dir: Option, - exec: &Arc, - ) -> CargoResult> { - let mut queue = JobQueue::new(self.bcx); - let mut plan = BuildPlan::new(); - let build_plan = self.bcx.build_config.build_plan; - self.prepare_units(export_dir, units)?; - self.prepare()?; - custom_build::build_map(&mut self, units)?; - self.check_collistions()?; - - for unit in units.iter() { - // Build up a list of pending jobs, each of which represent - // compiling a particular package. No actual work is executed as - // part of this, that's all done next as part of the `execute` - // function which will run everything in order with proper - // parallelism. - let force_rebuild = self.bcx.build_config.force_rebuild; - super::compile(&mut self, &mut queue, &mut plan, unit, exec, force_rebuild)?; - } - - // Now that we've figured out everything that we're going to do, do it! - queue.execute(&mut self, &mut plan)?; - - if build_plan { - plan.set_inputs(self.build_plan_inputs()?); - plan.output_plan(); - } - - for unit in units.iter() { - for output in self.outputs(unit)?.iter() { - if output.flavor == FileFlavor::DebugInfo { - continue; - } - - let bindst = output.bin_dst(); - - if unit.mode == CompileMode::Test { - self.compilation.tests.push(( - unit.pkg.clone(), - unit.target.clone(), - output.path.clone(), - )); - } else if unit.target.is_executable() { - self.compilation.binaries.push(bindst.clone()); - } - } - - for dep in self.dep_targets(unit).iter() { - if !unit.target.is_lib() { - continue; - } - - if dep.mode.is_run_custom_build() { - let out_dir = self.files().build_script_out_dir(dep).display().to_string(); - self.compilation - .extra_env - .entry(dep.pkg.package_id()) - .or_insert_with(Vec::new) - .push(("OUT_DIR".to_string(), out_dir)); - } - } - - if unit.mode.is_doc_test() { - // Note that we can *only* doc-test rlib outputs here. A - // staticlib output cannot be linked by the compiler (it just - // doesn't do that). A dylib output, however, can be linked by - // the compiler, but will always fail. Currently all dylibs are - // built as "static dylibs" where the standard library is - // statically linked into the dylib. The doc tests fail, - // however, for now as they try to link the standard library - // dynamically as well, causing problems. As a result we only - // pass `--extern` for rlib deps and skip out on all other - // artifacts. - let mut doctest_deps = Vec::new(); - for dep in self.dep_targets(unit) { - if dep.target.is_lib() && dep.mode == CompileMode::Build { - let outputs = self.outputs(&dep)?; - let outputs = outputs.iter().filter(|output| { - output.path.extension() == Some(OsStr::new("rlib")) - || dep.target.for_host() - }); - for output in outputs { - doctest_deps.push(( - self.bcx.extern_crate_name(unit, &dep)?, - output.path.clone(), - )); - } - } - } - // Help with tests to get a stable order with renamed deps. - doctest_deps.sort(); - self.compilation.to_doc_test.push(compilation::Doctest { - package: unit.pkg.clone(), - target: unit.target.clone(), - deps: doctest_deps, - }); - } - - let feats = self.bcx.resolve.features(unit.pkg.package_id()); - if !feats.is_empty() { - self.compilation - .cfgs - .entry(unit.pkg.package_id()) - .or_insert_with(|| { - feats - .iter() - .map(|feat| format!("feature=\"{}\"", feat)) - .collect() - }); - } - let rustdocflags = self.bcx.rustdocflags_args(unit); - if !rustdocflags.is_empty() { - self.compilation - .rustdocflags - .entry(unit.pkg.package_id()) - .or_insert_with(|| rustdocflags.to_vec()); - } - - super::output_depinfo(&mut self, unit)?; - } - - for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() { - self.compilation - .cfgs - .entry(pkg.clone()) - .or_insert_with(HashSet::new) - .extend(output.cfgs.iter().cloned()); - - self.compilation - .extra_env - .entry(pkg.clone()) - .or_insert_with(Vec::new) - .extend(output.env.iter().cloned()); - - for dir in output.library_paths.iter() { - self.compilation.native_dirs.insert(dir.clone()); - } - } - Ok(self.compilation) - } - - /// Returns the executable for the specified unit (if any). - pub fn get_executable(&mut self, unit: &Unit<'a>) -> CargoResult> { - for output in self.outputs(unit)?.iter() { - if output.flavor == FileFlavor::DebugInfo { - continue; - } - - let is_binary = unit.target.is_executable(); - let is_test = unit.mode.is_any_test() && !unit.mode.is_check(); - - if is_binary || is_test { - return Ok(Option::Some(output.bin_dst().clone())); - } - } - Ok(None) - } - - pub fn prepare_units( - &mut self, - export_dir: Option, - units: &[Unit<'a>], - ) -> CargoResult<()> { - let dest = if self.bcx.build_config.release { - "release" - } else { - "debug" - }; - let host_layout = Layout::new(self.bcx.ws, None, dest)?; - let target_layout = match self.bcx.build_config.requested_target.as_ref() { - Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?), - None => None, - }; - self.primary_packages - .extend(units.iter().map(|u| u.pkg.package_id())); - - build_unit_dependencies(self, units)?; - let files = CompilationFiles::new( - units, - host_layout, - target_layout, - export_dir, - self.bcx.ws, - self, - ); - self.files = Some(files); - Ok(()) - } - - /// Prepare this context, ensuring that all filesystem directories are in - /// place. - pub fn prepare(&mut self) -> CargoResult<()> { - let _p = profile::start("preparing layout"); - - self.files_mut() - .host - .prepare() - .chain_err(|| internal("couldn't prepare build directories"))?; - if let Some(ref mut target) = self.files.as_mut().unwrap().target { - target - .prepare() - .chain_err(|| internal("couldn't prepare build directories"))?; - } - - self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf(); - - let files = self.files.as_ref().unwrap(); - let layout = files.target.as_ref().unwrap_or(&files.host); - self.compilation.root_output = layout.dest().to_path_buf(); - self.compilation.deps_output = layout.deps().to_path_buf(); - Ok(()) - } - - pub fn files(&self) -> &CompilationFiles<'a, 'cfg> { - self.files.as_ref().unwrap() - } - - fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> { - self.files.as_mut().unwrap() - } - - /// Returns the filenames that the given unit will generate. - pub fn outputs(&self, unit: &Unit<'a>) -> CargoResult>> { - self.files.as_ref().unwrap().outputs(unit, self.bcx) - } - - /// For a package, return all targets which are registered as dependencies - /// for that package. - // - // TODO: this ideally should be `-> &[Unit<'a>]`. - pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec> { - // If this build script's execution has been overridden then we don't - // actually depend on anything, we've reached the end of the dependency - // chain as we've got all the info we're gonna get. - // - // Note there's a subtlety about this piece of code! The - // `build_script_overridden` map here is populated in - // `custom_build::build_map` which you need to call before inspecting - // dependencies. However, that code itself calls this method and - // gets a full pre-filtered set of dependencies. This is not super - // obvious, and clear, but it does work at the moment. - if unit.target.is_custom_build() { - let key = (unit.pkg.package_id(), unit.kind); - if self.build_script_overridden.contains(&key) { - return Vec::new(); - } - } - self.unit_dependencies[unit].clone() - } - - pub fn is_primary_package(&self, unit: &Unit<'a>) -> bool { - self.primary_packages.contains(&unit.pkg.package_id()) - } - - /// Gets a package for the given package ID. - pub fn get_package(&self, id: PackageId) -> CargoResult<&'a Package> { - self.package_cache - .get(&id) - .cloned() - .ok_or_else(|| failure::format_err!("failed to find {}", id)) - } - - /// Returns the list of filenames read by cargo to generate the `BuildContext` - /// (all `Cargo.toml`, etc.). - pub fn build_plan_inputs(&self) -> CargoResult> { - let mut inputs = Vec::new(); - // Note that we're using the `package_cache`, which should have been - // populated by `build_unit_dependencies`, and only those packages are - // considered as all the inputs. - // - // (Notably, we skip dev-deps here if they aren't present.) - for pkg in self.package_cache.values() { - inputs.push(pkg.manifest_path().to_path_buf()); - } - inputs.sort(); - Ok(inputs) - } - - fn check_collistions(&self) -> CargoResult<()> { - let mut output_collisions = HashMap::new(); - let describe_collision = - |unit: &Unit<'_>, other_unit: &Unit<'_>, path: &PathBuf| -> String { - format!( - "The {} target `{}` in package `{}` has the same output \ - filename as the {} target `{}` in package `{}`.\n\ - Colliding filename is: {}\n", - unit.target.kind().description(), - unit.target.name(), - unit.pkg.package_id(), - other_unit.target.kind().description(), - other_unit.target.name(), - other_unit.pkg.package_id(), - path.display() - ) - }; - let suggestion = - "Consider changing their names to be unique or compiling them separately.\n\ - This may become a hard error in the future; see \ - ."; - let report_collision = |unit: &Unit<'_>, - other_unit: &Unit<'_>, - path: &PathBuf| - -> CargoResult<()> { - if unit.target.name() == other_unit.target.name() { - self.bcx.config.shell().warn(format!( - "output filename collision.\n\ - {}\ - The targets should have unique names.\n\ - {}", - describe_collision(unit, other_unit, path), - suggestion - )) - } else { - self.bcx.config.shell().warn(format!( - "output filename collision.\n\ - {}\ - The output filenames should be unique.\n\ - {}\n\ - If this looks unexpected, it may be a bug in Cargo. Please file a bug report at\n\ - https://github.com/rust-lang/cargo/issues/ with as much information as you\n\ - can provide.\n\ - {} running on `{}` target `{}`\n\ - First unit: {:?}\n\ - Second unit: {:?}", - describe_collision(unit, other_unit, path), - suggestion, - crate::version(), self.bcx.host_triple(), self.bcx.target_triple(), - unit, other_unit)) - } - }; - let mut keys = self - .unit_dependencies - .keys() - .filter(|unit| !unit.mode.is_run_custom_build()) - .collect::>(); - // Sort for consistent error messages. - keys.sort_unstable(); - for unit in keys { - for output in self.outputs(unit)?.iter() { - if let Some(other_unit) = output_collisions.insert(output.path.clone(), unit) { - report_collision(unit, other_unit, &output.path)?; - } - if let Some(hardlink) = output.hardlink.as_ref() { - if let Some(other_unit) = output_collisions.insert(hardlink.clone(), unit) { - report_collision(unit, other_unit, hardlink)?; - } - } - if let Some(ref export_path) = output.export_path { - if let Some(other_unit) = output_collisions.insert(export_path.clone(), unit) { - self.bcx.config.shell().warn(format!( - "`--out-dir` filename collision.\n\ - {}\ - The exported filenames should be unique.\n\ - {}", - describe_collision(unit, other_unit, export_path), - suggestion - ))?; - } - } - } - } - Ok(()) - } - - /// Returns whether when `parent` depends on `dep` if it only requires the - /// metadata file from `dep`. - pub fn only_requires_rmeta(&self, parent: &Unit<'a>, dep: &Unit<'a>) -> bool { - // this is only enabled when pipelining is enabled - self.pipelining - // We're only a candidate for requiring an `rmeta` file if we - // ourselves are building an rlib, - && !parent.requires_upstream_objects() - && parent.mode == CompileMode::Build - // Our dependency must also be built as an rlib, otherwise the - // object code must be useful in some fashion - && !dep.requires_upstream_objects() - && dep.mode == CompileMode::Build - } - - /// Returns whether when `unit` is built whether it should emit metadata as - /// well because some compilations rely on that. - pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool { - self.rmeta_required.contains(unit) - } -} - -#[derive(Default)] -pub struct Links { - validated: HashSet, - links: HashMap, -} - -impl Links { - pub fn new() -> Links { - Links { - validated: HashSet::new(), - links: HashMap::new(), - } - } - - pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'_>) -> CargoResult<()> { - if !self.validated.insert(unit.pkg.package_id()) { - return Ok(()); - } - let lib = match unit.pkg.manifest().links() { - Some(lib) => lib, - None => return Ok(()), - }; - if let Some(&prev) = self.links.get(lib) { - let pkg = unit.pkg.package_id(); - - let describe_path = |pkgid: PackageId| -> String { - let dep_path = resolve.path_to_top(&pkgid); - let mut dep_path_desc = format!("package `{}`", dep_path[0]); - for dep in dep_path.iter().skip(1) { - write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); - } - dep_path_desc - }; - - failure::bail!( - "multiple packages link to native library `{}`, \ - but a native library can be linked only once\n\ - \n\ - {}\nlinks to native library `{}`\n\ - \n\ - {}\nalso links to native library `{}`", - lib, - describe_path(prev), - lib, - describe_path(pkg), - lib - ) - } - if !unit - .pkg - .manifest() - .targets() - .iter() - .any(|t| t.is_custom_build()) - { - failure::bail!( - "package `{}` specifies that it links to `{}` but does not \ - have a custom build script", - unit.pkg.package_id(), - lib - ) - } - self.links.insert(lib.to_string(), unit.pkg.package_id()); - Ok(()) - } -} +#![allow(deprecated)] +use std::collections::{HashMap, HashSet}; +use std::ffi::OsStr; +use std::fmt::Write; +use std::path::PathBuf; +use std::sync::Arc; + +use filetime::FileTime; +use jobserver::Client; + +use crate::core::compiler::compilation; +use crate::core::compiler::Unit; +use crate::core::{Package, PackageId, Resolve}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{internal, profile, Config}; + +use super::build_plan::BuildPlan; +use super::custom_build::{self, BuildDeps, BuildScripts, BuildState}; +use super::fingerprint::Fingerprint; +use super::job_queue::JobQueue; +use super::layout::Layout; +use super::{BuildContext, Compilation, CompileMode, Executor, FileFlavor, Kind}; + +mod unit_dependencies; +use self::unit_dependencies::build_unit_dependencies; + +mod compilation_files; +use self::compilation_files::CompilationFiles; +pub use self::compilation_files::{Metadata, OutputFile}; + +pub struct Context<'a, 'cfg> { + pub bcx: &'a BuildContext<'a, 'cfg>, + pub compilation: Compilation<'cfg>, + pub build_state: Arc, + pub build_script_overridden: HashSet<(PackageId, Kind)>, + pub build_explicit_deps: HashMap, BuildDeps>, + pub fingerprints: HashMap, Arc>, + pub mtime_cache: HashMap, + pub compiled: HashSet>, + pub build_scripts: HashMap, Arc>, + pub links: Links, + pub jobserver: Client, + primary_packages: HashSet, + unit_dependencies: HashMap, Vec>>, + files: Option>, + package_cache: HashMap, + + /// A flag indicating whether pipelining is enabled for this compilation + /// session. Pipelining largely only affects the edges of the dependency + /// graph that we generate at the end, and otherwise it's pretty + /// straightforward. + pipelining: bool, + + /// A set of units which are compiling rlibs and are expected to produce + /// metadata files in addition to the rlib itself. This is only filled in + /// when `pipelining` above is enabled. + rmeta_required: HashSet>, +} + +impl<'a, 'cfg> Context<'a, 'cfg> { + pub fn new(config: &'cfg Config, bcx: &'a BuildContext<'a, 'cfg>) -> CargoResult { + // Load up the jobserver that we'll use to manage our parallelism. This + // is the same as the GNU make implementation of a jobserver, and + // intentionally so! It's hoped that we can interact with GNU make and + // all share the same jobserver. + // + // Note that if we don't have a jobserver in our environment then we + // create our own, and we create it with `n-1` tokens because one token + // is ourself, a running process. + let jobserver = match config.jobserver_from_env() { + Some(c) => c.clone(), + None => Client::new(bcx.build_config.jobs as usize - 1) + .chain_err(|| "failed to create jobserver")?, + }; + + let pipelining = bcx + .config + .get_bool("build.pipelining")? + .map(|t| t.val) + .unwrap_or(false); + + Ok(Self { + bcx, + compilation: Compilation::new(bcx)?, + build_state: Arc::new(BuildState::new(&bcx.host_config, &bcx.target_config)), + fingerprints: HashMap::new(), + mtime_cache: HashMap::new(), + compiled: HashSet::new(), + build_scripts: HashMap::new(), + build_explicit_deps: HashMap::new(), + links: Links::new(), + jobserver, + build_script_overridden: HashSet::new(), + + primary_packages: HashSet::new(), + unit_dependencies: HashMap::new(), + files: None, + package_cache: HashMap::new(), + rmeta_required: HashSet::new(), + pipelining, + }) + } + + // Returns a mapping of the root package plus its immediate dependencies to + // where the compiled libraries are all located. + pub fn compile( + mut self, + units: &[Unit<'a>], + export_dir: Option, + exec: &Arc, + ) -> CargoResult> { + let mut queue = JobQueue::new(self.bcx); + let mut plan = BuildPlan::new(); + let build_plan = self.bcx.build_config.build_plan; + self.prepare_units(export_dir, units)?; + self.prepare()?; + custom_build::build_map(&mut self, units)?; + self.check_collistions()?; + + for unit in units.iter() { + // Build up a list of pending jobs, each of which represent + // compiling a particular package. No actual work is executed as + // part of this, that's all done next as part of the `execute` + // function which will run everything in order with proper + // parallelism. + let force_rebuild = self.bcx.build_config.force_rebuild; + super::compile(&mut self, &mut queue, &mut plan, unit, exec, force_rebuild)?; + } + + // Now that we've figured out everything that we're going to do, do it! + queue.execute(&mut self, &mut plan)?; + + if build_plan { + plan.set_inputs(self.build_plan_inputs()?); + plan.output_plan(); + } + + for unit in units.iter() { + for output in self.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + + let bindst = output.bin_dst(); + + if unit.mode == CompileMode::Test { + self.compilation.tests.push(( + unit.pkg.clone(), + unit.target.clone(), + output.path.clone(), + )); + } else if unit.target.is_executable() { + self.compilation.binaries.push(bindst.clone()); + } + } + + for dep in self.dep_targets(unit).iter() { + if !unit.target.is_lib() { + continue; + } + + if dep.mode.is_run_custom_build() { + let out_dir = self.files().build_script_out_dir(dep).display().to_string(); + self.compilation + .extra_env + .entry(dep.pkg.package_id()) + .or_insert_with(Vec::new) + .push(("OUT_DIR".to_string(), out_dir)); + } + } + + if unit.mode.is_doc_test() { + // Note that we can *only* doc-test rlib outputs here. A + // staticlib output cannot be linked by the compiler (it just + // doesn't do that). A dylib output, however, can be linked by + // the compiler, but will always fail. Currently all dylibs are + // built as "static dylibs" where the standard library is + // statically linked into the dylib. The doc tests fail, + // however, for now as they try to link the standard library + // dynamically as well, causing problems. As a result we only + // pass `--extern` for rlib deps and skip out on all other + // artifacts. + let mut doctest_deps = Vec::new(); + for dep in self.dep_targets(unit) { + if dep.target.is_lib() && dep.mode == CompileMode::Build { + let outputs = self.outputs(&dep)?; + let outputs = outputs.iter().filter(|output| { + output.path.extension() == Some(OsStr::new("rlib")) + || dep.target.for_host() + }); + for output in outputs { + doctest_deps.push(( + self.bcx.extern_crate_name(unit, &dep)?, + output.path.clone(), + )); + } + } + } + // Help with tests to get a stable order with renamed deps. + doctest_deps.sort(); + self.compilation.to_doc_test.push(compilation::Doctest { + package: unit.pkg.clone(), + target: unit.target.clone(), + deps: doctest_deps, + }); + } + + let bcx = self.bcx; + let feats = bcx.resolve.features(unit.pkg.package_id()); + if !feats.is_empty() { + self.compilation + .cfgs + .entry(unit.pkg.package_id()) + .or_insert_with(|| { + feats + .iter() + .filter(|feat| bcx.platform_activated(feat.1.as_ref(), unit.kind)) + .map(|feat| format!("feature=\"{}\"", feat.0)) + .collect() + }); + } + let rustdocflags = self.bcx.rustdocflags_args(unit); + if !rustdocflags.is_empty() { + self.compilation + .rustdocflags + .entry(unit.pkg.package_id()) + .or_insert_with(|| rustdocflags.to_vec()); + } + + super::output_depinfo(&mut self, unit)?; + } + + for (&(ref pkg, _), output) in self.build_state.outputs.lock().unwrap().iter() { + self.compilation + .cfgs + .entry(pkg.clone()) + .or_insert_with(HashSet::new) + .extend(output.cfgs.iter().cloned()); + + self.compilation + .extra_env + .entry(pkg.clone()) + .or_insert_with(Vec::new) + .extend(output.env.iter().cloned()); + + for dir in output.library_paths.iter() { + self.compilation.native_dirs.insert(dir.clone()); + } + } + Ok(self.compilation) + } + + /// Returns the executable for the specified unit (if any). + pub fn get_executable(&mut self, unit: &Unit<'a>) -> CargoResult> { + for output in self.outputs(unit)?.iter() { + if output.flavor == FileFlavor::DebugInfo { + continue; + } + + let is_binary = unit.target.is_executable(); + let is_test = unit.mode.is_any_test() && !unit.mode.is_check(); + + if is_binary || is_test { + return Ok(Option::Some(output.bin_dst().clone())); + } + } + Ok(None) + } + + pub fn prepare_units( + &mut self, + export_dir: Option, + units: &[Unit<'a>], + ) -> CargoResult<()> { + let dest = if self.bcx.build_config.release { + "release" + } else { + "debug" + }; + let host_layout = Layout::new(self.bcx.ws, None, dest)?; + let target_layout = match self.bcx.build_config.requested_target.as_ref() { + Some(target) => Some(Layout::new(self.bcx.ws, Some(target), dest)?), + None => None, + }; + self.primary_packages + .extend(units.iter().map(|u| u.pkg.package_id())); + + build_unit_dependencies(self, units)?; + let files = CompilationFiles::new( + units, + host_layout, + target_layout, + export_dir, + self.bcx.ws, + self, + ); + self.files = Some(files); + Ok(()) + } + + /// Prepare this context, ensuring that all filesystem directories are in + /// place. + pub fn prepare(&mut self) -> CargoResult<()> { + let _p = profile::start("preparing layout"); + + self.files_mut() + .host + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + if let Some(ref mut target) = self.files.as_mut().unwrap().target { + target + .prepare() + .chain_err(|| internal("couldn't prepare build directories"))?; + } + + self.compilation.host_deps_output = self.files_mut().host.deps().to_path_buf(); + + let files = self.files.as_ref().unwrap(); + let layout = files.target.as_ref().unwrap_or(&files.host); + self.compilation.root_output = layout.dest().to_path_buf(); + self.compilation.deps_output = layout.deps().to_path_buf(); + Ok(()) + } + + pub fn files(&self) -> &CompilationFiles<'a, 'cfg> { + self.files.as_ref().unwrap() + } + + fn files_mut(&mut self) -> &mut CompilationFiles<'a, 'cfg> { + self.files.as_mut().unwrap() + } + + /// Returns the filenames that the given unit will generate. + pub fn outputs(&self, unit: &Unit<'a>) -> CargoResult>> { + self.files.as_ref().unwrap().outputs(unit, self.bcx) + } + + /// For a package, return all targets which are registered as dependencies + /// for that package. + // + // TODO: this ideally should be `-> &[Unit<'a>]`. + pub fn dep_targets(&self, unit: &Unit<'a>) -> Vec> { + // If this build script's execution has been overridden then we don't + // actually depend on anything, we've reached the end of the dependency + // chain as we've got all the info we're gonna get. + // + // Note there's a subtlety about this piece of code! The + // `build_script_overridden` map here is populated in + // `custom_build::build_map` which you need to call before inspecting + // dependencies. However, that code itself calls this method and + // gets a full pre-filtered set of dependencies. This is not super + // obvious, and clear, but it does work at the moment. + if unit.target.is_custom_build() { + let key = (unit.pkg.package_id(), unit.kind); + if self.build_script_overridden.contains(&key) { + return Vec::new(); + } + } + self.unit_dependencies[unit].clone() + } + + pub fn is_primary_package(&self, unit: &Unit<'a>) -> bool { + self.primary_packages.contains(&unit.pkg.package_id()) + } + + /// Gets a package for the given package ID. + pub fn get_package(&self, id: PackageId) -> CargoResult<&'a Package> { + self.package_cache + .get(&id) + .cloned() + .ok_or_else(|| failure::format_err!("failed to find {}", id)) + } + + /// Returns the list of filenames read by cargo to generate the `BuildContext` + /// (all `Cargo.toml`, etc.). + pub fn build_plan_inputs(&self) -> CargoResult> { + let mut inputs = Vec::new(); + // Note that we're using the `package_cache`, which should have been + // populated by `build_unit_dependencies`, and only those packages are + // considered as all the inputs. + // + // (Notably, we skip dev-deps here if they aren't present.) + for pkg in self.package_cache.values() { + inputs.push(pkg.manifest_path().to_path_buf()); + } + inputs.sort(); + Ok(inputs) + } + + fn check_collistions(&self) -> CargoResult<()> { + let mut output_collisions = HashMap::new(); + let describe_collision = + |unit: &Unit<'_>, other_unit: &Unit<'_>, path: &PathBuf| -> String { + format!( + "The {} target `{}` in package `{}` has the same output \ + filename as the {} target `{}` in package `{}`.\n\ + Colliding filename is: {}\n", + unit.target.kind().description(), + unit.target.name(), + unit.pkg.package_id(), + other_unit.target.kind().description(), + other_unit.target.name(), + other_unit.pkg.package_id(), + path.display() + ) + }; + let suggestion = + "Consider changing their names to be unique or compiling them separately.\n\ + This may become a hard error in the future; see \ + ."; + let report_collision = |unit: &Unit<'_>, + other_unit: &Unit<'_>, + path: &PathBuf| + -> CargoResult<()> { + if unit.target.name() == other_unit.target.name() { + self.bcx.config.shell().warn(format!( + "output filename collision.\n\ + {}\ + The targets should have unique names.\n\ + {}", + describe_collision(unit, other_unit, path), + suggestion + )) + } else { + self.bcx.config.shell().warn(format!( + "output filename collision.\n\ + {}\ + The output filenames should be unique.\n\ + {}\n\ + If this looks unexpected, it may be a bug in Cargo. Please file a bug report at\n\ + https://github.com/rust-lang/cargo/issues/ with as much information as you\n\ + can provide.\n\ + {} running on `{}` target `{}`\n\ + First unit: {:?}\n\ + Second unit: {:?}", + describe_collision(unit, other_unit, path), + suggestion, + crate::version(), self.bcx.host_triple(), self.bcx.target_triple(), + unit, other_unit)) + } + }; + let mut keys = self + .unit_dependencies + .keys() + .filter(|unit| !unit.mode.is_run_custom_build()) + .collect::>(); + // Sort for consistent error messages. + keys.sort_unstable(); + for unit in keys { + for output in self.outputs(unit)?.iter() { + if let Some(other_unit) = output_collisions.insert(output.path.clone(), unit) { + report_collision(unit, other_unit, &output.path)?; + } + if let Some(hardlink) = output.hardlink.as_ref() { + if let Some(other_unit) = output_collisions.insert(hardlink.clone(), unit) { + report_collision(unit, other_unit, hardlink)?; + } + } + if let Some(ref export_path) = output.export_path { + if let Some(other_unit) = output_collisions.insert(export_path.clone(), unit) { + self.bcx.config.shell().warn(format!( + "`--out-dir` filename collision.\n\ + {}\ + The exported filenames should be unique.\n\ + {}", + describe_collision(unit, other_unit, export_path), + suggestion + ))?; + } + } + } + } + Ok(()) + } + + /// Returns whether when `parent` depends on `dep` if it only requires the + /// metadata file from `dep`. + pub fn only_requires_rmeta(&self, parent: &Unit<'a>, dep: &Unit<'a>) -> bool { + // this is only enabled when pipelining is enabled + self.pipelining + // We're only a candidate for requiring an `rmeta` file if we + // ourselves are building an rlib, + && !parent.requires_upstream_objects() + && parent.mode == CompileMode::Build + // Our dependency must also be built as an rlib, otherwise the + // object code must be useful in some fashion + && !dep.requires_upstream_objects() + && dep.mode == CompileMode::Build + } + + /// Returns whether when `unit` is built whether it should emit metadata as + /// well because some compilations rely on that. + pub fn rmeta_required(&self, unit: &Unit<'a>) -> bool { + self.rmeta_required.contains(unit) + } +} + +#[derive(Default)] +pub struct Links { + validated: HashSet, + links: HashMap, +} + +impl Links { + pub fn new() -> Links { + Links { + validated: HashSet::new(), + links: HashMap::new(), + } + } + + pub fn validate(&mut self, resolve: &Resolve, unit: &Unit<'_>) -> CargoResult<()> { + if !self.validated.insert(unit.pkg.package_id()) { + return Ok(()); + } + let lib = match unit.pkg.manifest().links() { + Some(lib) => lib, + None => return Ok(()), + }; + if let Some(&prev) = self.links.get(lib) { + let pkg = unit.pkg.package_id(); + + let describe_path = |pkgid: PackageId| -> String { + let dep_path = resolve.path_to_top(&pkgid); + let mut dep_path_desc = format!("package `{}`", dep_path[0]); + for dep in dep_path.iter().skip(1) { + write!(dep_path_desc, "\n ... which is depended on by `{}`", dep).unwrap(); + } + dep_path_desc + }; + + failure::bail!( + "multiple packages link to native library `{}`, \ + but a native library can be linked only once\n\ + \n\ + {}\nlinks to native library `{}`\n\ + \n\ + {}\nalso links to native library `{}`", + lib, + describe_path(prev), + lib, + describe_path(pkg), + lib + ) + } + if !unit + .pkg + .manifest() + .targets() + .iter() + .any(|t| t.is_custom_build()) + { + failure::bail!( + "package `{}` specifies that it links to `{}` but does not \ + have a custom build script", + unit.pkg.package_id(), + lib + ) + } + self.links.insert(lib.to_string(), unit.pkg.package_id()); + Ok(()) + } +} diff --git a/src/cargo/core/compiler/context/unit_dependencies.rs b/src/cargo/core/compiler/context/unit_dependencies.rs index 6e80f31a195..570c8c3b294 100644 --- a/src/cargo/core/compiler/context/unit_dependencies.rs +++ b/src/cargo/core/compiler/context/unit_dependencies.rs @@ -1,556 +1,569 @@ -//! Constructs the dependency graph for compilation. -//! -//! Rust code is typically organized as a set of Cargo packages. The -//! dependencies between the packages themselves are stored in the -//! `Resolve` struct. However, we can't use that information as is for -//! compilation! A package typically contains several targets, or crates, -//! and these targets has inter-dependencies. For example, you need to -//! compile the `lib` target before the `bin` one, and you need to compile -//! `build.rs` before either of those. -//! -//! So, we need to lower the `Resolve`, which specifies dependencies between -//! *packages*, to a graph of dependencies between their *targets*, and this -//! is exactly what this module is doing! Well, almost exactly: another -//! complication is that we might want to compile the same target several times -//! (for example, with and without tests), so we actually build a dependency -//! graph of `Unit`s, which capture these properties. - -use crate::core::compiler::Unit; -use crate::core::compiler::{BuildContext, CompileMode, Context, Kind}; -use crate::core::dependency::Kind as DepKind; -use crate::core::package::Downloads; -use crate::core::profiles::UnitFor; -use crate::core::{Package, PackageId, Target}; -use crate::CargoResult; -use log::trace; -use std::collections::{HashMap, HashSet}; - -struct State<'a, 'cfg, 'tmp> { - cx: &'tmp mut Context<'a, 'cfg>, - waiting_on_download: HashSet, - downloads: Downloads<'a, 'cfg>, -} - -pub fn build_unit_dependencies<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - roots: &[Unit<'a>], -) -> CargoResult<()> { - assert!( - cx.unit_dependencies.is_empty(), - "can only build unit deps once" - ); - - let mut state = State { - downloads: cx.bcx.packages.enable_download()?, - cx, - waiting_on_download: HashSet::new(), - }; - - loop { - for unit in roots.iter() { - state.get(unit.pkg.package_id())?; - - // Dependencies of tests/benches should not have `panic` set. - // We check the global test mode to see if we are running in `cargo - // test` in which case we ensure all dependencies have `panic` - // cleared, and avoid building the lib thrice (once with `panic`, once - // without, once for `--test`). In particular, the lib included for - // Doc tests and examples are `Build` mode here. - let unit_for = if unit.mode.is_any_test() || state.cx.bcx.build_config.test() { - UnitFor::new_test() - } else if unit.target.is_custom_build() { - // This normally doesn't happen, except `clean` aggressively - // generates all units. - UnitFor::new_build() - } else if unit.target.for_host() { - // Proc macro / plugin should never have panic set. - UnitFor::new_compiler() - } else { - UnitFor::new_normal() - }; - deps_of(unit, &mut state, unit_for)?; - } - - if !state.waiting_on_download.is_empty() { - state.finish_some_downloads()?; - state.cx.unit_dependencies.clear(); - } else { - break; - } - } - - connect_run_custom_build_deps(&mut state); - - trace!("ALL UNIT DEPENDENCIES {:#?}", state.cx.unit_dependencies); - - record_units_requiring_metadata(state.cx); - - // Dependencies are used in tons of places throughout the backend, many of - // which affect the determinism of the build itself. As a result be sure - // that dependency lists are always sorted to ensure we've always got a - // deterministic output. - for list in state.cx.unit_dependencies.values_mut() { - list.sort(); - } - - Ok(()) -} - -fn deps_of<'a, 'cfg, 'tmp>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg, 'tmp>, - unit_for: UnitFor, -) -> CargoResult<()> { - // Currently the `unit_dependencies` map does not include `unit_for`. This should - // be safe for now. `TestDependency` only exists to clear the `panic` - // flag, and you'll never ask for a `unit` with `panic` set as a - // `TestDependency`. `CustomBuild` should also be fine since if the - // requested unit's settings are the same as `Any`, `CustomBuild` can't - // affect anything else in the hierarchy. - if !state.cx.unit_dependencies.contains_key(unit) { - let unit_deps = compute_deps(unit, state, unit_for)?; - let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect(); - state.cx.unit_dependencies.insert(*unit, to_insert); - for (unit, unit_for) in unit_deps { - deps_of(&unit, state, unit_for)?; - } - } - Ok(()) -} - -/// For a package, returns all targets that are registered as dependencies -/// for that package. -/// This returns a `Vec` of `(Unit, UnitFor)` pairs. The `UnitFor` -/// is the profile type that should be used for dependencies of the unit. -fn compute_deps<'a, 'cfg, 'tmp>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg, 'tmp>, - unit_for: UnitFor, -) -> CargoResult, UnitFor)>> { - if unit.mode.is_run_custom_build() { - return compute_deps_custom_build(unit, state.cx.bcx); - } else if unit.mode.is_doc() { - // Note: this does not include doc test. - return compute_deps_doc(unit, state); - } - - let bcx = state.cx.bcx; - let id = unit.pkg.package_id(); - let deps = bcx.resolve.deps(id).filter(|&(_id, deps)| { - assert!(!deps.is_empty()); - deps.iter().any(|dep| { - // If this target is a build command, then we only want build - // dependencies, otherwise we want everything *other than* build - // dependencies. - if unit.target.is_custom_build() != dep.is_build() { - return false; - } - - // If this dependency is **not** a transitive dependency, then it - // only applies to test/example targets. - if !dep.is_transitive() - && !unit.target.is_test() - && !unit.target.is_example() - && !unit.mode.is_any_test() - { - return false; - } - - // If this dependency is only available for certain platforms, - // make sure we're only enabling it for that platform. - if !bcx.dep_platform_activated(dep, unit.kind) { - return false; - } - - // If we've gotten past all that, then this dependency is - // actually used! - true - }) - }); - - let mut ret = Vec::new(); - for (id, _) in deps { - let pkg = match state.get(id)? { - Some(pkg) => pkg, - None => continue, - }; - let lib = match pkg.targets().iter().find(|t| t.is_lib()) { - Some(t) => t, - None => continue, - }; - let mode = check_or_build_mode(unit.mode, lib); - let dep_unit_for = unit_for.with_for_host(lib.for_host()); - - if bcx.config.cli_unstable().dual_proc_macros - && lib.proc_macro() - && unit.kind == Kind::Target - { - let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Target, mode); - ret.push((unit, dep_unit_for)); - let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Host, mode); - ret.push((unit, dep_unit_for)); - } else { - let unit = new_unit(bcx, pkg, lib, dep_unit_for, unit.kind.for_target(lib), mode); - ret.push((unit, dep_unit_for)); - } - } - - // If this target is a build script, then what we've collected so far is - // all we need. If this isn't a build script, then it depends on the - // build script if there is one. - if unit.target.is_custom_build() { - return Ok(ret); - } - ret.extend(dep_build_script(unit, bcx)); - - // If this target is a binary, test, example, etc, then it depends on - // the library of the same package. The call to `resolve.deps` above - // didn't include `pkg` in the return values, so we need to special case - // it here and see if we need to push `(pkg, pkg_lib_target)`. - if unit.target.is_lib() && unit.mode != CompileMode::Doctest { - return Ok(ret); - } - ret.extend(maybe_lib(unit, bcx, unit_for)); - - // If any integration tests/benches are being run, make sure that - // binaries are built as well. - if !unit.mode.is_check() - && unit.mode.is_any_test() - && (unit.target.is_test() || unit.target.is_bench()) - { - ret.extend( - unit.pkg - .targets() - .iter() - .filter(|t| { - let no_required_features = Vec::new(); - - t.is_bin() && - // Skip binaries with required features that have not been selected. - t.required_features().unwrap_or(&no_required_features).iter().all(|f| { - bcx.resolve.features(id).contains(f) - }) - }) - .map(|t| { - ( - new_unit( - bcx, - unit.pkg, - t, - UnitFor::new_normal(), - unit.kind.for_target(t), - CompileMode::Build, - ), - UnitFor::new_normal(), - ) - }), - ); - } - - Ok(ret) -} - -/// Returns the dependencies needed to run a build script. -/// -/// The `unit` provided must represent an execution of a build script, and -/// the returned set of units must all be run before `unit` is run. -fn compute_deps_custom_build<'a, 'cfg>( - unit: &Unit<'a>, - bcx: &BuildContext<'a, 'cfg>, -) -> CargoResult, UnitFor)>> { - // When not overridden, then the dependencies to run a build script are: - // - // 1. Compiling the build script itself. - // 2. For each immediate dependency of our package which has a `links` - // key, the execution of that build script. - // - // We don't have a great way of handling (2) here right now so this is - // deferred until after the graph of all unit dependencies has been - // constructed. - let unit = new_unit( - bcx, - unit.pkg, - unit.target, - UnitFor::new_build(), - // Build scripts always compiled for the host. - Kind::Host, - CompileMode::Build, - ); - // All dependencies of this unit should use profiles for custom - // builds. - Ok(vec![(unit, UnitFor::new_build())]) -} - -/// Returns the dependencies necessary to document a package. -fn compute_deps_doc<'a, 'cfg, 'tmp>( - unit: &Unit<'a>, - state: &mut State<'a, 'cfg, 'tmp>, -) -> CargoResult, UnitFor)>> { - let bcx = state.cx.bcx; - let deps = bcx - .resolve - .deps(unit.pkg.package_id()) - .filter(|&(_id, deps)| { - deps.iter().any(|dep| match dep.kind() { - DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind), - _ => false, - }) - }); - - // To document a library, we depend on dependencies actually being - // built. If we're documenting *all* libraries, then we also depend on - // the documentation of the library being built. - let mut ret = Vec::new(); - for (id, _deps) in deps { - let dep = match state.get(id)? { - Some(dep) => dep, - None => continue, - }; - let lib = match dep.targets().iter().find(|t| t.is_lib()) { - Some(lib) => lib, - None => continue, - }; - // Rustdoc only needs rmeta files for regular dependencies. - // However, for plugins/proc macros, deps should be built like normal. - let mode = check_or_build_mode(unit.mode, lib); - let dep_unit_for = UnitFor::new_normal().with_for_host(lib.for_host()); - let lib_unit = new_unit(bcx, dep, lib, dep_unit_for, unit.kind.for_target(lib), mode); - ret.push((lib_unit, dep_unit_for)); - if let CompileMode::Doc { deps: true } = unit.mode { - // Document this lib as well. - let doc_unit = new_unit( - bcx, - dep, - lib, - dep_unit_for, - unit.kind.for_target(lib), - unit.mode, - ); - ret.push((doc_unit, dep_unit_for)); - } - } - - // Be sure to build/run the build script for documented libraries. - ret.extend(dep_build_script(unit, bcx)); - - // If we document a binary/example, we need the library available. - if unit.target.is_bin() || unit.target.is_example() { - ret.extend(maybe_lib(unit, bcx, UnitFor::new_normal())); - } - Ok(ret) -} - -fn maybe_lib<'a>( - unit: &Unit<'a>, - bcx: &BuildContext<'a, '_>, - unit_for: UnitFor, -) -> Option<(Unit<'a>, UnitFor)> { - unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { - let mode = check_or_build_mode(unit.mode, t); - let unit = new_unit(bcx, unit.pkg, t, unit_for, unit.kind.for_target(t), mode); - (unit, unit_for) - }) -} - -/// If a build script is scheduled to be run for the package specified by -/// `unit`, this function will return the unit to run that build script. -/// -/// Overriding a build script simply means that the running of the build -/// script itself doesn't have any dependencies, so even in that case a unit -/// of work is still returned. `None` is only returned if the package has no -/// build script. -fn dep_build_script<'a>( - unit: &Unit<'a>, - bcx: &BuildContext<'a, '_>, -) -> Option<(Unit<'a>, UnitFor)> { - unit.pkg - .targets() - .iter() - .find(|t| t.is_custom_build()) - .map(|t| { - // The profile stored in the Unit is the profile for the thing - // the custom build script is running for. - let unit = bcx.units.intern( - unit.pkg, - t, - bcx.profiles.get_profile_run_custom_build(&unit.profile), - unit.kind, - CompileMode::RunCustomBuild, - ); - - (unit, UnitFor::new_build()) - }) -} - -/// Choose the correct mode for dependencies. -fn check_or_build_mode(mode: CompileMode, target: &Target) -> CompileMode { - match mode { - CompileMode::Check { .. } | CompileMode::Doc { .. } => { - if target.for_host() { - // Plugin and proc macro targets should be compiled like - // normal. - CompileMode::Build - } else { - // Regular dependencies should not be checked with --test. - // Regular dependencies of doc targets should emit rmeta only. - CompileMode::Check { test: false } - } - } - _ => CompileMode::Build, - } -} - -fn new_unit<'a>( - bcx: &BuildContext<'a, '_>, - pkg: &'a Package, - target: &'a Target, - unit_for: UnitFor, - kind: Kind, - mode: CompileMode, -) -> Unit<'a> { - let profile = bcx.profiles.get_profile( - pkg.package_id(), - bcx.ws.is_member(pkg), - unit_for, - mode, - bcx.build_config.release, - ); - - bcx.units.intern(pkg, target, profile, kind, mode) -} - -/// Fill in missing dependencies for units of the `RunCustomBuild` -/// -/// As mentioned above in `compute_deps_custom_build` each build script -/// execution has two dependencies. The first is compiling the build script -/// itself (already added) and the second is that all crates the package of the -/// build script depends on with `links` keys, their build script execution. (a -/// bit confusing eh?) -/// -/// Here we take the entire `deps` map and add more dependencies from execution -/// of one build script to execution of another build script. -fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) { - let mut new_deps = Vec::new(); - - { - // First up build a reverse dependency map. This is a mapping of all - // `RunCustomBuild` known steps to the unit which depends on them. For - // example a library might depend on a build script, so this map will - // have the build script as the key and the library would be in the - // value's set. - let mut reverse_deps = HashMap::new(); - for (unit, deps) in state.cx.unit_dependencies.iter() { - for dep in deps { - if dep.mode == CompileMode::RunCustomBuild { - reverse_deps - .entry(dep) - .or_insert_with(HashSet::new) - .insert(unit); - } - } - } - - // Next, we take a look at all build scripts executions listed in the - // dependency map. Our job here is to take everything that depends on - // this build script (from our reverse map above) and look at the other - // package dependencies of these parents. - // - // If we depend on a linkable target and the build script mentions - // `links`, then we depend on that package's build script! Here we use - // `dep_build_script` to manufacture an appropriate build script unit to - // depend on. - for unit in state - .cx - .unit_dependencies - .keys() - .filter(|k| k.mode == CompileMode::RunCustomBuild) - { - let reverse_deps = match reverse_deps.get(unit) { - Some(set) => set, - None => continue, - }; - - let to_add = reverse_deps - .iter() - .flat_map(|reverse_dep| state.cx.unit_dependencies[reverse_dep].iter()) - .filter(|other| { - other.pkg != unit.pkg - && other.target.linkable() - && other.pkg.manifest().links().is_some() - }) - .filter_map(|other| dep_build_script(other, state.cx.bcx).map(|p| p.0)) - .collect::>(); - - if !to_add.is_empty() { - new_deps.push((*unit, to_add)); - } - } - } - - // And finally, add in all the missing dependencies! - for (unit, new_deps) in new_deps { - state - .cx - .unit_dependencies - .get_mut(&unit) - .unwrap() - .extend(new_deps); - } -} - -/// Records the list of units which are required to emit metadata. -/// -/// Units which depend only on the metadata of others requires the others to -/// actually produce metadata, so we'll record that here. -fn record_units_requiring_metadata(cx: &mut Context<'_, '_>) { - for (key, deps) in cx.unit_dependencies.iter() { - for dep in deps { - if cx.only_requires_rmeta(key, dep) { - cx.rmeta_required.insert(*dep); - } - } - } -} - -impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> { - fn get(&mut self, id: PackageId) -> CargoResult> { - if let Some(pkg) = self.cx.package_cache.get(&id) { - return Ok(Some(pkg)); - } - if !self.waiting_on_download.insert(id) { - return Ok(None); - } - if let Some(pkg) = self.downloads.start(id)? { - self.cx.package_cache.insert(id, pkg); - self.waiting_on_download.remove(&id); - return Ok(Some(pkg)); - } - Ok(None) - } - - /// Completes at least one downloading, maybe waiting for more to complete. - /// - /// This function will block the current thread waiting for at least one - /// crate to finish downloading. The function may continue to download more - /// crates if it looks like there's a long enough queue of crates to keep - /// downloading. When only a handful of packages remain this function - /// returns, and it's hoped that by returning we'll be able to push more - /// packages to download into the queue. - fn finish_some_downloads(&mut self) -> CargoResult<()> { - assert!(self.downloads.remaining() > 0); - loop { - let pkg = self.downloads.wait()?; - self.waiting_on_download.remove(&pkg.package_id()); - self.cx.package_cache.insert(pkg.package_id(), pkg); - - // Arbitrarily choose that 5 or more packages concurrently download - // is a good enough number to "fill the network pipe". If we have - // less than this let's recompute the whole unit dependency graph - // again and try to find some more packages to download. - if self.downloads.remaining() < 5 { - break; - } - } - Ok(()) - } -} +//! Constructs the dependency graph for compilation. +//! +//! Rust code is typically organized as a set of Cargo packages. The +//! dependencies between the packages themselves are stored in the +//! `Resolve` struct. However, we can't use that information as is for +//! compilation! A package typically contains several targets, or crates, +//! and these targets has inter-dependencies. For example, you need to +//! compile the `lib` target before the `bin` one, and you need to compile +//! `build.rs` before either of those. +//! +//! So, we need to lower the `Resolve`, which specifies dependencies between +//! *packages*, to a graph of dependencies between their *targets*, and this +//! is exactly what this module is doing! Well, almost exactly: another +//! complication is that we might want to compile the same target several times +//! (for example, with and without tests), so we actually build a dependency +//! graph of `Unit`s, which capture these properties. + +use crate::core::compiler::Unit; +use crate::core::compiler::{BuildContext, CompileMode, Context, Kind}; +use crate::core::dependency::Kind as DepKind; +use crate::core::package::Downloads; +use crate::core::profiles::UnitFor; +use crate::core::{Package, PackageId, Target}; +use crate::CargoResult; +use log::trace; +use std::collections::{HashMap, HashSet}; + +struct State<'a, 'cfg, 'tmp> { + cx: &'tmp mut Context<'a, 'cfg>, + waiting_on_download: HashSet, + downloads: Downloads<'a, 'cfg>, +} + +pub fn build_unit_dependencies<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + roots: &[Unit<'a>], +) -> CargoResult<()> { + assert!( + cx.unit_dependencies.is_empty(), + "can only build unit deps once" + ); + + let mut state = State { + downloads: cx.bcx.packages.enable_download()?, + cx, + waiting_on_download: HashSet::new(), + }; + + loop { + for unit in roots.iter() { + state.get(unit.pkg.package_id())?; + + // Dependencies of tests/benches should not have `panic` set. + // We check the global test mode to see if we are running in `cargo + // test` in which case we ensure all dependencies have `panic` + // cleared, and avoid building the lib thrice (once with `panic`, once + // without, once for `--test`). In particular, the lib included for + // Doc tests and examples are `Build` mode here. + let unit_for = if unit.mode.is_any_test() || state.cx.bcx.build_config.test() { + UnitFor::new_test() + } else if unit.target.is_custom_build() { + // This normally doesn't happen, except `clean` aggressively + // generates all units. + UnitFor::new_build() + } else if unit.target.for_host() { + // Proc macro / plugin should never have panic set. + UnitFor::new_compiler() + } else { + UnitFor::new_normal() + }; + deps_of(unit, &mut state, unit_for)?; + } + + if !state.waiting_on_download.is_empty() { + state.finish_some_downloads()?; + state.cx.unit_dependencies.clear(); + } else { + break; + } + } + + connect_run_custom_build_deps(&mut state); + + trace!("ALL UNIT DEPENDENCIES {:#?}", state.cx.unit_dependencies); + + record_units_requiring_metadata(state.cx); + + // Dependencies are used in tons of places throughout the backend, many of + // which affect the determinism of the build itself. As a result be sure + // that dependency lists are always sorted to ensure we've always got a + // deterministic output. + for list in state.cx.unit_dependencies.values_mut() { + list.sort(); + } + + Ok(()) +} + +fn deps_of<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, + unit_for: UnitFor, +) -> CargoResult<()> { + // Currently the `unit_dependencies` map does not include `unit_for`. This should + // be safe for now. `TestDependency` only exists to clear the `panic` + // flag, and you'll never ask for a `unit` with `panic` set as a + // `TestDependency`. `CustomBuild` should also be fine since if the + // requested unit's settings are the same as `Any`, `CustomBuild` can't + // affect anything else in the hierarchy. + if !state.cx.unit_dependencies.contains_key(unit) { + let unit_deps = compute_deps(unit, state, unit_for)?; + let to_insert: Vec<_> = unit_deps.iter().map(|&(unit, _)| unit).collect(); + state.cx.unit_dependencies.insert(*unit, to_insert); + for (unit, unit_for) in unit_deps { + deps_of(&unit, state, unit_for)?; + } + } + Ok(()) +} + +/// For a package, returns all targets that are registered as dependencies +/// for that package. +/// This returns a `Vec` of `(Unit, UnitFor)` pairs. The `UnitFor` +/// is the profile type that should be used for dependencies of the unit. +fn compute_deps<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, + unit_for: UnitFor, +) -> CargoResult, UnitFor)>> { + if unit.mode.is_run_custom_build() { + return compute_deps_custom_build(unit, state.cx.bcx); + } else if unit.mode.is_doc() { + // Note: this does not include doc test. + return compute_deps_doc(unit, state); + } + + let bcx = state.cx.bcx; + let id = unit.pkg.package_id(); + let deps = bcx.resolve.deps(id).filter(|&(_id, deps)| { + assert!(!deps.is_empty()); + deps.iter().any(|dep| { + // If this target is a build command, then we only want build + // dependencies, otherwise we want everything *other than* build + // dependencies. + if unit.target.is_custom_build() != dep.is_build() { + return false; + } + + // If this dependency is **not** a transitive dependency, then it + // only applies to test/example targets. + if !dep.is_transitive() + && !unit.target.is_test() + && !unit.target.is_example() + && !unit.mode.is_any_test() + { + return false; + } + + // If this dependency is only available for certain platforms, + // make sure we're only enabling it for that platform. + if !bcx.dep_platform_activated(dep, unit.kind) { + return false; + } + + // If the dependency is optional, then we're only activating it + // if the corresponding feature was activated + if dep.is_optional() { + // Same for features this dependency is referenced + if let Some(platform) = bcx.resolve.features(id).get(&*dep.name_in_toml()) { + if !bcx.platform_activated(platform.as_ref(), unit.kind) { + return false; + } + } else { + return false; + } + } + + // If we've gotten past all that, then this dependency is + // actually used! + true + }) + }); + + let mut ret = Vec::new(); + for (id, _) in deps { + let pkg = match state.get(id)? { + Some(pkg) => pkg, + None => continue, + }; + let lib = match pkg.targets().iter().find(|t| t.is_lib()) { + Some(t) => t, + None => continue, + }; + let mode = check_or_build_mode(unit.mode, lib); + let dep_unit_for = unit_for.with_for_host(lib.for_host()); + + if bcx.config.cli_unstable().dual_proc_macros + && lib.proc_macro() + && unit.kind == Kind::Target + { + let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Target, mode); + ret.push((unit, dep_unit_for)); + let unit = new_unit(bcx, pkg, lib, dep_unit_for, Kind::Host, mode); + ret.push((unit, dep_unit_for)); + } else { + let unit = new_unit(bcx, pkg, lib, dep_unit_for, unit.kind.for_target(lib), mode); + ret.push((unit, dep_unit_for)); + } + } + + // If this target is a build script, then what we've collected so far is + // all we need. If this isn't a build script, then it depends on the + // build script if there is one. + if unit.target.is_custom_build() { + return Ok(ret); + } + ret.extend(dep_build_script(unit, bcx)); + + // If this target is a binary, test, example, etc, then it depends on + // the library of the same package. The call to `resolve.deps` above + // didn't include `pkg` in the return values, so we need to special case + // it here and see if we need to push `(pkg, pkg_lib_target)`. + if unit.target.is_lib() && unit.mode != CompileMode::Doctest { + return Ok(ret); + } + ret.extend(maybe_lib(unit, bcx, unit_for)); + + // If any integration tests/benches are being run, make sure that + // binaries are built as well. + if !unit.mode.is_check() + && unit.mode.is_any_test() + && (unit.target.is_test() || unit.target.is_bench()) + { + ret.extend( + unit.pkg + .targets() + .iter() + .filter(|t| { + let no_required_features = Vec::new(); + + t.is_bin() && + // Skip binaries with required features that have not been selected. + t.required_features().unwrap_or(&no_required_features).iter().all(|f| { + bcx.resolve.features(id).contains_key(f) && bcx.platform_activated(bcx.resolve.features(id).get(f).unwrap().as_ref(), unit.kind) + }) + }) + .map(|t| { + ( + new_unit( + bcx, + unit.pkg, + t, + UnitFor::new_normal(), + unit.kind.for_target(t), + CompileMode::Build, + ), + UnitFor::new_normal(), + ) + }), + ); + } + + Ok(ret) +} + +/// Returns the dependencies needed to run a build script. +/// +/// The `unit` provided must represent an execution of a build script, and +/// the returned set of units must all be run before `unit` is run. +fn compute_deps_custom_build<'a, 'cfg>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, 'cfg>, +) -> CargoResult, UnitFor)>> { + // When not overridden, then the dependencies to run a build script are: + // + // 1. Compiling the build script itself. + // 2. For each immediate dependency of our package which has a `links` + // key, the execution of that build script. + // + // We don't have a great way of handling (2) here right now so this is + // deferred until after the graph of all unit dependencies has been + // constructed. + let unit = new_unit( + bcx, + unit.pkg, + unit.target, + UnitFor::new_build(), + // Build scripts always compiled for the host. + Kind::Host, + CompileMode::Build, + ); + // All dependencies of this unit should use profiles for custom + // builds. + Ok(vec![(unit, UnitFor::new_build())]) +} + +/// Returns the dependencies necessary to document a package. +fn compute_deps_doc<'a, 'cfg, 'tmp>( + unit: &Unit<'a>, + state: &mut State<'a, 'cfg, 'tmp>, +) -> CargoResult, UnitFor)>> { + let bcx = state.cx.bcx; + let deps = bcx + .resolve + .deps(unit.pkg.package_id()) + .filter(|&(_id, deps)| { + deps.iter().any(|dep| match dep.kind() { + DepKind::Normal => bcx.dep_platform_activated(dep, unit.kind), + _ => false, + }) + }); + + // To document a library, we depend on dependencies actually being + // built. If we're documenting *all* libraries, then we also depend on + // the documentation of the library being built. + let mut ret = Vec::new(); + for (id, _deps) in deps { + let dep = match state.get(id)? { + Some(dep) => dep, + None => continue, + }; + let lib = match dep.targets().iter().find(|t| t.is_lib()) { + Some(lib) => lib, + None => continue, + }; + // Rustdoc only needs rmeta files for regular dependencies. + // However, for plugins/proc macros, deps should be built like normal. + let mode = check_or_build_mode(unit.mode, lib); + let dep_unit_for = UnitFor::new_normal().with_for_host(lib.for_host()); + let lib_unit = new_unit(bcx, dep, lib, dep_unit_for, unit.kind.for_target(lib), mode); + ret.push((lib_unit, dep_unit_for)); + if let CompileMode::Doc { deps: true } = unit.mode { + // Document this lib as well. + let doc_unit = new_unit( + bcx, + dep, + lib, + dep_unit_for, + unit.kind.for_target(lib), + unit.mode, + ); + ret.push((doc_unit, dep_unit_for)); + } + } + + // Be sure to build/run the build script for documented libraries. + ret.extend(dep_build_script(unit, bcx)); + + // If we document a binary/example, we need the library available. + if unit.target.is_bin() || unit.target.is_example() { + ret.extend(maybe_lib(unit, bcx, UnitFor::new_normal())); + } + Ok(ret) +} + +fn maybe_lib<'a>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, '_>, + unit_for: UnitFor, +) -> Option<(Unit<'a>, UnitFor)> { + unit.pkg.targets().iter().find(|t| t.linkable()).map(|t| { + let mode = check_or_build_mode(unit.mode, t); + let unit = new_unit(bcx, unit.pkg, t, unit_for, unit.kind.for_target(t), mode); + (unit, unit_for) + }) +} + +/// If a build script is scheduled to be run for the package specified by +/// `unit`, this function will return the unit to run that build script. +/// +/// Overriding a build script simply means that the running of the build +/// script itself doesn't have any dependencies, so even in that case a unit +/// of work is still returned. `None` is only returned if the package has no +/// build script. +fn dep_build_script<'a>( + unit: &Unit<'a>, + bcx: &BuildContext<'a, '_>, +) -> Option<(Unit<'a>, UnitFor)> { + unit.pkg + .targets() + .iter() + .find(|t| t.is_custom_build()) + .map(|t| { + // The profile stored in the Unit is the profile for the thing + // the custom build script is running for. + let unit = bcx.units.intern( + unit.pkg, + t, + bcx.profiles.get_profile_run_custom_build(&unit.profile), + unit.kind, + CompileMode::RunCustomBuild, + ); + + (unit, UnitFor::new_build()) + }) +} + +/// Choose the correct mode for dependencies. +fn check_or_build_mode(mode: CompileMode, target: &Target) -> CompileMode { + match mode { + CompileMode::Check { .. } | CompileMode::Doc { .. } => { + if target.for_host() { + // Plugin and proc macro targets should be compiled like + // normal. + CompileMode::Build + } else { + // Regular dependencies should not be checked with --test. + // Regular dependencies of doc targets should emit rmeta only. + CompileMode::Check { test: false } + } + } + _ => CompileMode::Build, + } +} + +fn new_unit<'a>( + bcx: &BuildContext<'a, '_>, + pkg: &'a Package, + target: &'a Target, + unit_for: UnitFor, + kind: Kind, + mode: CompileMode, +) -> Unit<'a> { + let profile = bcx.profiles.get_profile( + pkg.package_id(), + bcx.ws.is_member(pkg), + unit_for, + mode, + bcx.build_config.release, + ); + + bcx.units.intern(pkg, target, profile, kind, mode) +} + +/// Fill in missing dependencies for units of the `RunCustomBuild` +/// +/// As mentioned above in `compute_deps_custom_build` each build script +/// execution has two dependencies. The first is compiling the build script +/// itself (already added) and the second is that all crates the package of the +/// build script depends on with `links` keys, their build script execution. (a +/// bit confusing eh?) +/// +/// Here we take the entire `deps` map and add more dependencies from execution +/// of one build script to execution of another build script. +fn connect_run_custom_build_deps(state: &mut State<'_, '_, '_>) { + let mut new_deps = Vec::new(); + + { + // First up build a reverse dependency map. This is a mapping of all + // `RunCustomBuild` known steps to the unit which depends on them. For + // example a library might depend on a build script, so this map will + // have the build script as the key and the library would be in the + // value's set. + let mut reverse_deps = HashMap::new(); + for (unit, deps) in state.cx.unit_dependencies.iter() { + for dep in deps { + if dep.mode == CompileMode::RunCustomBuild { + reverse_deps + .entry(dep) + .or_insert_with(HashSet::new) + .insert(unit); + } + } + } + + // Next, we take a look at all build scripts executions listed in the + // dependency map. Our job here is to take everything that depends on + // this build script (from our reverse map above) and look at the other + // package dependencies of these parents. + // + // If we depend on a linkable target and the build script mentions + // `links`, then we depend on that package's build script! Here we use + // `dep_build_script` to manufacture an appropriate build script unit to + // depend on. + for unit in state + .cx + .unit_dependencies + .keys() + .filter(|k| k.mode == CompileMode::RunCustomBuild) + { + let reverse_deps = match reverse_deps.get(unit) { + Some(set) => set, + None => continue, + }; + + let to_add = reverse_deps + .iter() + .flat_map(|reverse_dep| state.cx.unit_dependencies[reverse_dep].iter()) + .filter(|other| { + other.pkg != unit.pkg + && other.target.linkable() + && other.pkg.manifest().links().is_some() + }) + .filter_map(|other| dep_build_script(other, state.cx.bcx).map(|p| p.0)) + .collect::>(); + + if !to_add.is_empty() { + new_deps.push((*unit, to_add)); + } + } + } + + // And finally, add in all the missing dependencies! + for (unit, new_deps) in new_deps { + state + .cx + .unit_dependencies + .get_mut(&unit) + .unwrap() + .extend(new_deps); + } +} + +/// Records the list of units which are required to emit metadata. +/// +/// Units which depend only on the metadata of others requires the others to +/// actually produce metadata, so we'll record that here. +fn record_units_requiring_metadata(cx: &mut Context<'_, '_>) { + for (key, deps) in cx.unit_dependencies.iter() { + for dep in deps { + if cx.only_requires_rmeta(key, dep) { + cx.rmeta_required.insert(*dep); + } + } + } +} + +impl<'a, 'cfg, 'tmp> State<'a, 'cfg, 'tmp> { + fn get(&mut self, id: PackageId) -> CargoResult> { + if let Some(pkg) = self.cx.package_cache.get(&id) { + return Ok(Some(pkg)); + } + if !self.waiting_on_download.insert(id) { + return Ok(None); + } + if let Some(pkg) = self.downloads.start(id)? { + self.cx.package_cache.insert(id, pkg); + self.waiting_on_download.remove(&id); + return Ok(Some(pkg)); + } + Ok(None) + } + + /// Completes at least one downloading, maybe waiting for more to complete. + /// + /// This function will block the current thread waiting for at least one + /// crate to finish downloading. The function may continue to download more + /// crates if it looks like there's a long enough queue of crates to keep + /// downloading. When only a handful of packages remain this function + /// returns, and it's hoped that by returning we'll be able to push more + /// packages to download into the queue. + fn finish_some_downloads(&mut self) -> CargoResult<()> { + assert!(self.downloads.remaining() > 0); + loop { + let pkg = self.downloads.wait()?; + self.waiting_on_download.remove(&pkg.package_id()); + self.cx.package_cache.insert(pkg.package_id(), pkg); + + // Arbitrarily choose that 5 or more packages concurrently download + // is a good enough number to "fill the network pipe". If we have + // less than this let's recompute the whole unit dependency graph + // again and try to find some more packages to download. + if self.downloads.remaining() < 5 { + break; + } + } + Ok(()) + } +} diff --git a/src/cargo/core/compiler/custom_build.rs b/src/cargo/core/compiler/custom_build.rs index 7978d1d480e..591f784bf7b 100644 --- a/src/cargo/core/compiler/custom_build.rs +++ b/src/cargo/core/compiler/custom_build.rs @@ -1,732 +1,732 @@ -use std::collections::hash_map::{Entry, HashMap}; -use std::collections::{BTreeSet, HashSet}; -use std::fs; -use std::path::{Path, PathBuf}; -use std::str; -use std::sync::{Arc, Mutex}; - -use crate::core::compiler::job_queue::JobState; -use crate::core::PackageId; -use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::machine_message::{self, Message}; -use crate::util::Cfg; -use crate::util::{self, internal, paths, profile}; - -use super::job::{Freshness, Job, Work}; -use super::{fingerprint, Context, Kind, TargetConfig, Unit}; - -/// Contains the parsed output of a custom build script. -#[derive(Clone, Debug, Hash)] -pub struct BuildOutput { - /// Paths to pass to rustc with the `-L` flag. - pub library_paths: Vec, - /// Names and link kinds of libraries, suitable for the `-l` flag. - pub library_links: Vec, - /// Linker arguments suitable to be passed to `-C link-arg=` - pub linker_args: Vec, - /// Various `--cfg` flags to pass to the compiler. - pub cfgs: Vec, - /// Additional environment variables to run the compiler with. - pub env: Vec<(String, String)>, - /// Metadata to pass to the immediate dependencies. - pub metadata: Vec<(String, String)>, - /// Paths to trigger a rerun of this build script. - /// May be absolute or relative paths (relative to package root). - pub rerun_if_changed: Vec, - /// Environment variables which, when changed, will cause a rebuild. - pub rerun_if_env_changed: Vec, - /// Warnings generated by this build. - pub warnings: Vec, -} - -/// Map of packages to build info. -pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; - -/// Build info and overrides. -pub struct BuildState { - pub outputs: Mutex, - overrides: HashMap<(String, Kind), BuildOutput>, -} - -#[derive(Default)] -pub struct BuildScripts { - // Cargo will use this `to_link` vector to add `-L` flags to compiles as we - // propagate them upwards towards the final build. Note, however, that we - // need to preserve the ordering of `to_link` to be topologically sorted. - // This will ensure that build scripts which print their paths properly will - // correctly pick up the files they generated (if there are duplicates - // elsewhere). - // - // To preserve this ordering, the (id, kind) is stored in two places, once - // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain - // this as we're building interactively below to ensure that the memory - // usage here doesn't blow up too much. - // - // For more information, see #2354. - pub to_link: Vec<(PackageId, Kind)>, - seen_to_link: HashSet<(PackageId, Kind)>, - pub plugins: BTreeSet, -} - -#[derive(Debug)] -pub struct BuildDeps { - pub build_script_output: PathBuf, - pub rerun_if_changed: Vec, - pub rerun_if_env_changed: Vec, -} - -/// Prepares a `Work` that executes the target as a custom build script. -/// -/// The `req` given is the requirement which this run of the build script will -/// prepare work for. If the requirement is specified as both the target and the -/// host platforms it is assumed that the two are equal and the build script is -/// only run once (not twice). -pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { - let _p = profile::start(format!( - "build script prepare: {}/{}", - unit.pkg, - unit.target.name() - )); - - let key = (unit.pkg.package_id(), unit.kind); - - if cx.build_script_overridden.contains(&key) { - fingerprint::prepare_target(cx, unit, false) - } else { - build_work(cx, unit) - } -} - -fn emit_build_output(state: &JobState<'_>, output: &BuildOutput, package_id: PackageId) { - let library_paths = output - .library_paths - .iter() - .map(|l| l.display().to_string()) - .collect::>(); - - let msg = machine_message::BuildScript { - package_id, - linked_libs: &output.library_links, - linked_paths: &library_paths, - cfgs: &output.cfgs, - env: &output.env, - } - .to_json_string(); - state.stdout(msg); -} - -fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { - assert!(unit.mode.is_run_custom_build()); - let bcx = &cx.bcx; - let dependencies = cx.dep_targets(unit); - let build_script_unit = dependencies - .iter() - .find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build()) - .expect("running a script not depending on an actual script"); - let script_dir = cx.files().build_script_dir(build_script_unit); - let script_out_dir = cx.files().build_script_out_dir(unit); - let script_run_dir = cx.files().build_script_run_dir(unit); - let build_plan = bcx.build_config.build_plan; - let invocation_name = unit.buildkey(); - - if let Some(deps) = unit.pkg.manifest().metabuild() { - prepare_metabuild(cx, build_script_unit, deps)?; - } - - // Building the command to execute - let to_exec = script_dir.join(unit.target.name()); - - // Start preparing the process to execute, starting out with some - // environment variables. Note that the profile-related environment - // variables are not set with this the build script's profile but rather the - // package's library profile. - // NOTE: if you add any profile flags, be sure to update - // `Profiles::get_profile_run_custom_build` so that those flags get - // carried over. - let to_exec = to_exec.into_os_string(); - let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; - let debug = unit.profile.debuginfo.unwrap_or(0) != 0; - cmd.env("OUT_DIR", &script_out_dir) - .env("CARGO_MANIFEST_DIR", unit.pkg.root()) - .env("NUM_JOBS", &bcx.jobs().to_string()) - .env( - "TARGET", - &match unit.kind { - Kind::Host => bcx.host_triple(), - Kind::Target => bcx.target_triple(), - }, - ) - .env("DEBUG", debug.to_string()) - .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) - .env( - "PROFILE", - if bcx.build_config.release { - "release" - } else { - "debug" - }, - ) - .env("HOST", &bcx.host_triple()) - .env("RUSTC", &bcx.rustc.path) - .env("RUSTDOC", &*bcx.config.rustdoc()?) - .inherit_jobserver(&cx.jobserver); - - if let Some(ref linker) = bcx.target_config.linker { - cmd.env("RUSTC_LINKER", linker); - } - - if let Some(links) = unit.pkg.manifest().links() { - cmd.env("CARGO_MANIFEST_LINKS", links); - } - - // Be sure to pass along all enabled features for this package, this is the - // last piece of statically known information that we have. - for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { - cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat)), "1"); - } - - let mut cfg_map = HashMap::new(); - for cfg in bcx.cfg(unit.kind) { - match *cfg { - Cfg::Name(ref n) => { - cfg_map.insert(n.clone(), None); - } - Cfg::KeyPair(ref k, ref v) => { - if let Some(ref mut values) = - *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new())) - { - values.push(v.clone()) - } - } - } - } - for (k, v) in cfg_map { - let k = format!("CARGO_CFG_{}", super::envify(&k)); - match v { - Some(list) => { - cmd.env(&k, list.join(",")); - } - None => { - cmd.env(&k, ""); - } - } - } - - // Gather the set of native dependencies that this package has along with - // some other variables to close over. - // - // This information will be used at build-time later on to figure out which - // sorts of variables need to be discovered at that time. - let lib_deps = { - dependencies - .iter() - .filter_map(|unit| { - if unit.mode.is_run_custom_build() { - Some(( - unit.pkg.manifest().links().unwrap().to_string(), - unit.pkg.package_id(), - )) - } else { - None - } - }) - .collect::>() - }; - let pkg_name = unit.pkg.to_string(); - let build_state = Arc::clone(&cx.build_state); - let id = unit.pkg.package_id(); - let output_file = script_run_dir.join("output"); - let err_file = script_run_dir.join("stderr"); - let root_output_file = script_run_dir.join("root-output"); - let host_target_root = cx.files().host_root().to_path_buf(); - let all = ( - id, - pkg_name.clone(), - Arc::clone(&build_state), - output_file.clone(), - script_out_dir.clone(), - ); - let build_scripts = super::load_build_deps(cx, unit); - let kind = unit.kind; - let json_messages = bcx.build_config.emit_json(); - let extra_verbose = bcx.config.extra_verbose(); - let (prev_output, prev_script_out_dir) = prev_build_output(cx, unit); - - fs::create_dir_all(&script_dir)?; - fs::create_dir_all(&script_out_dir)?; - - // Prepare the unit of "dirty work" which will actually run the custom build - // command. - // - // Note that this has to do some extra work just before running the command - // to determine extra environment variables and such. - let dirty = Work::new(move |state| { - // Make sure that OUT_DIR exists. - // - // If we have an old build directory, then just move it into place, - // otherwise create it! - if fs::metadata(&script_out_dir).is_err() { - fs::create_dir(&script_out_dir).chain_err(|| { - internal( - "failed to create script output directory for \ - build command", - ) - })?; - } - - // For all our native lib dependencies, pick up their metadata to pass - // along to this custom build command. We're also careful to augment our - // dynamic library search path in case the build script depended on any - // native dynamic libraries. - if !build_plan { - let build_state = build_state.outputs.lock().unwrap(); - for (name, id) in lib_deps { - let key = (id, kind); - let state = build_state.get(&key).ok_or_else(|| { - internal(format!( - "failed to locate build state for env \ - vars: {}/{:?}", - id, kind - )) - })?; - let data = &state.metadata; - for &(ref key, ref value) in data.iter() { - cmd.env( - &format!("DEP_{}_{}", super::envify(&name), super::envify(key)), - value, - ); - } - } - if let Some(build_scripts) = build_scripts { - super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &host_target_root)?; - } - } - - if build_plan { - state.build_plan(invocation_name, cmd.clone(), Arc::new(Vec::new())); - return Ok(()); - } - - // And now finally, run the build command itself! - state.running(&cmd); - let timestamp = paths::set_invocation_time(&script_run_dir)?; - let prefix = format!("[{} {}] ", id.name(), id.version()); - let output = cmd - .exec_with_streaming( - &mut |stdout| { - if extra_verbose { - state.stdout(format!("{}{}", prefix, stdout)); - } - Ok(()) - }, - &mut |stderr| { - if extra_verbose { - state.stderr(format!("{}{}", prefix, stderr)); - } - Ok(()) - }, - true, - ) - .chain_err(|| format!("failed to run custom build command for `{}`", pkg_name))?; - - // After the build command has finished running, we need to be sure to - // remember all of its output so we can later discover precisely what it - // was, even if we don't run the build command again (due to freshness). - // - // This is also the location where we provide feedback into the build - // state informing what variables were discovered via our script as - // well. - paths::write(&output_file, &output.stdout)?; - filetime::set_file_times(output_file, timestamp, timestamp)?; - paths::write(&err_file, &output.stderr)?; - paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?; - let parsed_output = - BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?; - - if json_messages { - emit_build_output(state, &parsed_output, id); - } - build_state.insert(id, kind, parsed_output); - Ok(()) - }); - - // Now that we've prepared our work-to-do, we need to prepare the fresh work - // itself to run when we actually end up just discarding what we calculated - // above. - let fresh = Work::new(move |state| { - let (id, pkg_name, build_state, output_file, script_out_dir) = all; - let output = match prev_output { - Some(output) => output, - None => BuildOutput::parse_file( - &output_file, - &pkg_name, - &prev_script_out_dir, - &script_out_dir, - )?, - }; - - if json_messages { - emit_build_output(state, &output, id); - } - - build_state.insert(id, kind, output); - Ok(()) - }); - - let mut job = if cx.bcx.build_config.build_plan { - Job::new(Work::noop(), Freshness::Dirty) - } else { - fingerprint::prepare_target(cx, unit, false)? - }; - if job.freshness() == Freshness::Dirty { - job.before(dirty); - } else { - job.before(fresh); - } - Ok(job) -} - -impl BuildState { - pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState { - let mut overrides = HashMap::new(); - let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host)); - let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target)); - for ((name, output), kind) in i1.chain(i2) { - overrides.insert((name.clone(), kind), output.clone()); - } - BuildState { - outputs: Mutex::new(HashMap::new()), - overrides, - } - } - - fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) { - self.outputs.lock().unwrap().insert((id, kind), output); - } -} - -impl BuildOutput { - pub fn parse_file( - path: &Path, - pkg_name: &str, - script_out_dir_when_generated: &Path, - script_out_dir: &Path, - ) -> CargoResult { - let contents = paths::read_bytes(path)?; - BuildOutput::parse( - &contents, - pkg_name, - script_out_dir_when_generated, - script_out_dir, - ) - } - - // Parses the output of a script. - // The `pkg_name` is used for error messages. - pub fn parse( - input: &[u8], - pkg_name: &str, - script_out_dir_when_generated: &Path, - script_out_dir: &Path, - ) -> CargoResult { - let mut library_paths = Vec::new(); - let mut library_links = Vec::new(); - let mut linker_args = Vec::new(); - let mut cfgs = Vec::new(); - let mut env = Vec::new(); - let mut metadata = Vec::new(); - let mut rerun_if_changed = Vec::new(); - let mut rerun_if_env_changed = Vec::new(); - let mut warnings = Vec::new(); - let whence = format!("build script of `{}`", pkg_name); - - for line in input.split(|b| *b == b'\n') { - let line = match str::from_utf8(line) { - Ok(line) => line.trim(), - Err(..) => continue, - }; - let mut iter = line.splitn(2, ':'); - if iter.next() != Some("cargo") { - // skip this line since it doesn't start with "cargo:" - continue; - } - let data = match iter.next() { - Some(val) => val, - None => continue, - }; - - // getting the `key=value` part of the line - let mut iter = data.splitn(2, '='); - let key = iter.next(); - let value = iter.next(); - let (key, value) = match (key, value) { - (Some(a), Some(b)) => (a, b.trim_end()), - // Line started with `cargo:` but didn't match `key=value`. - _ => failure::bail!("Wrong output in {}: `{}`", whence, line), - }; - - // This will rewrite paths if the target directory has been moved. - let value = value.replace( - script_out_dir_when_generated.to_str().unwrap(), - script_out_dir.to_str().unwrap(), - ); - - match key { - "rustc-flags" => { - let (paths, links) = BuildOutput::parse_rustc_flags(&value, &whence)?; - library_links.extend(links.into_iter()); - library_paths.extend(paths.into_iter()); - } - "rustc-link-lib" => library_links.push(value.to_string()), - "rustc-link-search" => library_paths.push(PathBuf::from(value)), - "rustc-cdylib-link-arg" => linker_args.push(value.to_string()), - "rustc-cfg" => cfgs.push(value.to_string()), - "rustc-env" => env.push(BuildOutput::parse_rustc_env(&value, &whence)?), - "warning" => warnings.push(value.to_string()), - "rerun-if-changed" => rerun_if_changed.push(PathBuf::from(value)), - "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), - _ => metadata.push((key.to_string(), value.to_string())), - } - } - - Ok(BuildOutput { - library_paths, - library_links, - linker_args, - cfgs, - env, - metadata, - rerun_if_changed, - rerun_if_env_changed, - warnings, - }) - } - - pub fn parse_rustc_flags( - value: &str, - whence: &str, - ) -> CargoResult<(Vec, Vec)> { - let value = value.trim(); - let mut flags_iter = value - .split(|c: char| c.is_whitespace()) - .filter(|w| w.chars().any(|c| !c.is_whitespace())); - let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); - while let Some(flag) = flags_iter.next() { - if flag != "-l" && flag != "-L" { - failure::bail!( - "Only `-l` and `-L` flags are allowed in {}: `{}`", - whence, - value - ) - } - let value = match flags_iter.next() { - Some(v) => v, - None => failure::bail!( - "Flag in rustc-flags has no value in {}: `{}`", - whence, - value - ), - }; - match flag { - "-l" => library_links.push(value.to_string()), - "-L" => library_paths.push(PathBuf::from(value)), - - // was already checked above - _ => failure::bail!("only -l and -L flags are allowed"), - }; - } - Ok((library_paths, library_links)) - } - - pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> { - let mut iter = value.splitn(2, '='); - let name = iter.next(); - let val = iter.next(); - match (name, val) { - (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), - _ => failure::bail!("Variable rustc-env has no value in {}: {}", whence, value), - } - } -} - -fn prepare_metabuild<'a, 'cfg>( - cx: &Context<'a, 'cfg>, - unit: &Unit<'a>, - deps: &[String], -) -> CargoResult<()> { - let mut output = Vec::new(); - let available_deps = cx.dep_targets(unit); - // Filter out optional dependencies, and look up the actual lib name. - let meta_deps: Vec<_> = deps - .iter() - .filter_map(|name| { - available_deps - .iter() - .find(|u| u.pkg.name().as_str() == name.as_str()) - .map(|dep| dep.target.crate_name()) - }) - .collect(); - for dep in &meta_deps { - output.push(format!("use {};\n", dep)); - } - output.push("fn main() {\n".to_string()); - for dep in &meta_deps { - output.push(format!(" {}::metabuild();\n", dep)); - } - output.push("}\n".to_string()); - let output = output.join(""); - let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); - fs::create_dir_all(path.parent().unwrap())?; - paths::write_if_changed(path, &output)?; - Ok(()) -} - -impl BuildDeps { - pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { - BuildDeps { - build_script_output: output_file.to_path_buf(), - rerun_if_changed: output - .map(|p| &p.rerun_if_changed) - .cloned() - .unwrap_or_default(), - rerun_if_env_changed: output - .map(|p| &p.rerun_if_env_changed) - .cloned() - .unwrap_or_default(), - } - } -} - -/// Computes the `build_scripts` map in the `Context` which tracks what build -/// scripts each package depends on. -/// -/// The global `build_scripts` map lists for all (package, kind) tuples what set -/// of packages' build script outputs must be considered. For example this lists -/// all dependencies' `-L` flags which need to be propagated transitively. -/// -/// The given set of targets to this function is the initial set of -/// targets/profiles which are being built. -pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { - let mut ret = HashMap::new(); - for unit in units { - build(&mut ret, cx, unit)?; - } - cx.build_scripts - .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v)))); - return Ok(()); - - // Recursive function to build up the map we're constructing. This function - // memoizes all of its return values as it goes along. - fn build<'a, 'b, 'cfg>( - out: &'a mut HashMap, BuildScripts>, - cx: &mut Context<'b, 'cfg>, - unit: &Unit<'b>, - ) -> CargoResult<&'a BuildScripts> { - // Do a quick pre-flight check to see if we've already calculated the - // set of dependencies. - if out.contains_key(unit) { - return Ok(&out[unit]); - } - - let key = unit - .pkg - .manifest() - .links() - .map(|l| (l.to_string(), unit.kind)); - let build_state = &cx.build_state; - if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { - let key = (unit.pkg.package_id(), unit.kind); - cx.build_script_overridden.insert(key); - build_state - .outputs - .lock() - .unwrap() - .insert(key, output.clone()); - } - - let mut ret = BuildScripts::default(); - - if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { - add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); - } - - if unit.mode.is_run_custom_build() { - parse_previous_explicit_deps(cx, unit)?; - } - - // We want to invoke the compiler deterministically to be cache-friendly - // to rustc invocation caching schemes, so be sure to generate the same - // set of build script dependency orderings via sorting the targets that - // come out of the `Context`. - let mut targets = cx.dep_targets(unit); - targets.sort_by_key(|u| u.pkg.package_id()); - - for unit in targets.iter() { - let dep_scripts = build(out, cx, unit)?; - - if unit.target.for_host() { - ret.plugins - .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned()); - } else if unit.target.linkable() { - for &(pkg, kind) in dep_scripts.to_link.iter() { - add_to_link(&mut ret, pkg, kind); - } - } - } - - match out.entry(*unit) { - Entry::Vacant(entry) => Ok(entry.insert(ret)), - Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), - } - } - - // When adding an entry to 'to_link' we only actually push it on if the - // script hasn't seen it yet (e.g., we don't push on duplicates). - fn add_to_link(scripts: &mut BuildScripts, pkg: PackageId, kind: Kind) { - if scripts.seen_to_link.insert((pkg, kind)) { - scripts.to_link.push((pkg, kind)); - } - } - - fn parse_previous_explicit_deps<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - ) -> CargoResult<()> { - let script_run_dir = cx.files().build_script_run_dir(unit); - let output_file = script_run_dir.join("output"); - let (prev_output, _) = prev_build_output(cx, unit); - let deps = BuildDeps::new(&output_file, prev_output.as_ref()); - cx.build_explicit_deps.insert(*unit, deps); - Ok(()) - } -} - -/// Returns the previous parsed `BuildOutput`, if any, from a previous -/// execution. -/// -/// Also returns the directory containing the output, typically used later in -/// processing. -fn prev_build_output<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> (Option, PathBuf) { - let script_out_dir = cx.files().build_script_out_dir(unit); - let script_run_dir = cx.files().build_script_run_dir(unit); - let root_output_file = script_run_dir.join("root-output"); - let output_file = script_run_dir.join("output"); - - let prev_script_out_dir = paths::read_bytes(&root_output_file) - .and_then(|bytes| util::bytes2path(&bytes)) - .unwrap_or_else(|_| script_out_dir.clone()); - - ( - BuildOutput::parse_file( - &output_file, - &unit.pkg.to_string(), - &prev_script_out_dir, - &script_out_dir, - ) - .ok(), - prev_script_out_dir, - ) -} +use std::collections::hash_map::{Entry, HashMap}; +use std::collections::{BTreeSet, HashSet}; +use std::fs; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::{Arc, Mutex}; + +use crate::core::compiler::job_queue::JobState; +use crate::core::PackageId; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::machine_message::{self, Message}; +use crate::util::Cfg; +use crate::util::{self, internal, paths, profile}; + +use super::job::{Freshness, Job, Work}; +use super::{fingerprint, Context, Kind, TargetConfig, Unit}; + +/// Contains the parsed output of a custom build script. +#[derive(Clone, Debug, Hash)] +pub struct BuildOutput { + /// Paths to pass to rustc with the `-L` flag. + pub library_paths: Vec, + /// Names and link kinds of libraries, suitable for the `-l` flag. + pub library_links: Vec, + /// Linker arguments suitable to be passed to `-C link-arg=` + pub linker_args: Vec, + /// Various `--cfg` flags to pass to the compiler. + pub cfgs: Vec, + /// Additional environment variables to run the compiler with. + pub env: Vec<(String, String)>, + /// Metadata to pass to the immediate dependencies. + pub metadata: Vec<(String, String)>, + /// Paths to trigger a rerun of this build script. + /// May be absolute or relative paths (relative to package root). + pub rerun_if_changed: Vec, + /// Environment variables which, when changed, will cause a rebuild. + pub rerun_if_env_changed: Vec, + /// Warnings generated by this build. + pub warnings: Vec, +} + +/// Map of packages to build info. +pub type BuildMap = HashMap<(PackageId, Kind), BuildOutput>; + +/// Build info and overrides. +pub struct BuildState { + pub outputs: Mutex, + overrides: HashMap<(String, Kind), BuildOutput>, +} + +#[derive(Default)] +pub struct BuildScripts { + // Cargo will use this `to_link` vector to add `-L` flags to compiles as we + // propagate them upwards towards the final build. Note, however, that we + // need to preserve the ordering of `to_link` to be topologically sorted. + // This will ensure that build scripts which print their paths properly will + // correctly pick up the files they generated (if there are duplicates + // elsewhere). + // + // To preserve this ordering, the (id, kind) is stored in two places, once + // in the `Vec` and once in `seen_to_link` for a fast lookup. We maintain + // this as we're building interactively below to ensure that the memory + // usage here doesn't blow up too much. + // + // For more information, see #2354. + pub to_link: Vec<(PackageId, Kind)>, + seen_to_link: HashSet<(PackageId, Kind)>, + pub plugins: BTreeSet, +} + +#[derive(Debug)] +pub struct BuildDeps { + pub build_script_output: PathBuf, + pub rerun_if_changed: Vec, + pub rerun_if_env_changed: Vec, +} + +/// Prepares a `Work` that executes the target as a custom build script. +/// +/// The `req` given is the requirement which this run of the build script will +/// prepare work for. If the requirement is specified as both the target and the +/// host platforms it is assumed that the two are equal and the build script is +/// only run once (not twice). +pub fn prepare<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { + let _p = profile::start(format!( + "build script prepare: {}/{}", + unit.pkg, + unit.target.name() + )); + + let key = (unit.pkg.package_id(), unit.kind); + + if cx.build_script_overridden.contains(&key) { + fingerprint::prepare_target(cx, unit, false) + } else { + build_work(cx, unit) + } +} + +fn emit_build_output(state: &JobState<'_>, output: &BuildOutput, package_id: PackageId) { + let library_paths = output + .library_paths + .iter() + .map(|l| l.display().to_string()) + .collect::>(); + + let msg = machine_message::BuildScript { + package_id, + linked_libs: &output.library_links, + linked_paths: &library_paths, + cfgs: &output.cfgs, + env: &output.env, + } + .to_json_string(); + state.stdout(msg); +} + +fn build_work<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { + assert!(unit.mode.is_run_custom_build()); + let bcx = &cx.bcx; + let dependencies = cx.dep_targets(unit); + let build_script_unit = dependencies + .iter() + .find(|d| !d.mode.is_run_custom_build() && d.target.is_custom_build()) + .expect("running a script not depending on an actual script"); + let script_dir = cx.files().build_script_dir(build_script_unit); + let script_out_dir = cx.files().build_script_out_dir(unit); + let script_run_dir = cx.files().build_script_run_dir(unit); + let build_plan = bcx.build_config.build_plan; + let invocation_name = unit.buildkey(); + + if let Some(deps) = unit.pkg.manifest().metabuild() { + prepare_metabuild(cx, build_script_unit, deps)?; + } + + // Building the command to execute + let to_exec = script_dir.join(unit.target.name()); + + // Start preparing the process to execute, starting out with some + // environment variables. Note that the profile-related environment + // variables are not set with this the build script's profile but rather the + // package's library profile. + // NOTE: if you add any profile flags, be sure to update + // `Profiles::get_profile_run_custom_build` so that those flags get + // carried over. + let to_exec = to_exec.into_os_string(); + let mut cmd = cx.compilation.host_process(to_exec, unit.pkg)?; + let debug = unit.profile.debuginfo.unwrap_or(0) != 0; + cmd.env("OUT_DIR", &script_out_dir) + .env("CARGO_MANIFEST_DIR", unit.pkg.root()) + .env("NUM_JOBS", &bcx.jobs().to_string()) + .env( + "TARGET", + &match unit.kind { + Kind::Host => bcx.host_triple(), + Kind::Target => bcx.target_triple(), + }, + ) + .env("DEBUG", debug.to_string()) + .env("OPT_LEVEL", &unit.profile.opt_level.to_string()) + .env( + "PROFILE", + if bcx.build_config.release { + "release" + } else { + "debug" + }, + ) + .env("HOST", &bcx.host_triple()) + .env("RUSTC", &bcx.rustc.path) + .env("RUSTDOC", &*bcx.config.rustdoc()?) + .inherit_jobserver(&cx.jobserver); + + if let Some(ref linker) = bcx.target_config.linker { + cmd.env("RUSTC_LINKER", linker); + } + + if let Some(links) = unit.pkg.manifest().links() { + cmd.env("CARGO_MANIFEST_LINKS", links); + } + + // Be sure to pass along all enabled features for this package, this is the + // last piece of statically known information that we have. + for feat in bcx.resolve.features(unit.pkg.package_id()).iter() { + cmd.env(&format!("CARGO_FEATURE_{}", super::envify(feat.0)), "1"); + } + + let mut cfg_map = HashMap::new(); + for cfg in bcx.cfg(unit.kind) { + match *cfg { + Cfg::Name(ref n) => { + cfg_map.insert(n.clone(), None); + } + Cfg::KeyPair(ref k, ref v) => { + if let Some(ref mut values) = + *cfg_map.entry(k.clone()).or_insert_with(|| Some(Vec::new())) + { + values.push(v.clone()) + } + } + } + } + for (k, v) in cfg_map { + let k = format!("CARGO_CFG_{}", super::envify(&k)); + match v { + Some(list) => { + cmd.env(&k, list.join(",")); + } + None => { + cmd.env(&k, ""); + } + } + } + + // Gather the set of native dependencies that this package has along with + // some other variables to close over. + // + // This information will be used at build-time later on to figure out which + // sorts of variables need to be discovered at that time. + let lib_deps = { + dependencies + .iter() + .filter_map(|unit| { + if unit.mode.is_run_custom_build() { + Some(( + unit.pkg.manifest().links().unwrap().to_string(), + unit.pkg.package_id(), + )) + } else { + None + } + }) + .collect::>() + }; + let pkg_name = unit.pkg.to_string(); + let build_state = Arc::clone(&cx.build_state); + let id = unit.pkg.package_id(); + let output_file = script_run_dir.join("output"); + let err_file = script_run_dir.join("stderr"); + let root_output_file = script_run_dir.join("root-output"); + let host_target_root = cx.files().host_root().to_path_buf(); + let all = ( + id, + pkg_name.clone(), + Arc::clone(&build_state), + output_file.clone(), + script_out_dir.clone(), + ); + let build_scripts = super::load_build_deps(cx, unit); + let kind = unit.kind; + let json_messages = bcx.build_config.emit_json(); + let extra_verbose = bcx.config.extra_verbose(); + let (prev_output, prev_script_out_dir) = prev_build_output(cx, unit); + + fs::create_dir_all(&script_dir)?; + fs::create_dir_all(&script_out_dir)?; + + // Prepare the unit of "dirty work" which will actually run the custom build + // command. + // + // Note that this has to do some extra work just before running the command + // to determine extra environment variables and such. + let dirty = Work::new(move |state| { + // Make sure that OUT_DIR exists. + // + // If we have an old build directory, then just move it into place, + // otherwise create it! + if fs::metadata(&script_out_dir).is_err() { + fs::create_dir(&script_out_dir).chain_err(|| { + internal( + "failed to create script output directory for \ + build command", + ) + })?; + } + + // For all our native lib dependencies, pick up their metadata to pass + // along to this custom build command. We're also careful to augment our + // dynamic library search path in case the build script depended on any + // native dynamic libraries. + if !build_plan { + let build_state = build_state.outputs.lock().unwrap(); + for (name, id) in lib_deps { + let key = (id, kind); + let state = build_state.get(&key).ok_or_else(|| { + internal(format!( + "failed to locate build state for env \ + vars: {}/{:?}", + id, kind + )) + })?; + let data = &state.metadata; + for &(ref key, ref value) in data.iter() { + cmd.env( + &format!("DEP_{}_{}", super::envify(&name), super::envify(key)), + value, + ); + } + } + if let Some(build_scripts) = build_scripts { + super::add_plugin_deps(&mut cmd, &build_state, &build_scripts, &host_target_root)?; + } + } + + if build_plan { + state.build_plan(invocation_name, cmd.clone(), Arc::new(Vec::new())); + return Ok(()); + } + + // And now finally, run the build command itself! + state.running(&cmd); + let timestamp = paths::set_invocation_time(&script_run_dir)?; + let prefix = format!("[{} {}] ", id.name(), id.version()); + let output = cmd + .exec_with_streaming( + &mut |stdout| { + if extra_verbose { + state.stdout(format!("{}{}", prefix, stdout)); + } + Ok(()) + }, + &mut |stderr| { + if extra_verbose { + state.stderr(format!("{}{}", prefix, stderr)); + } + Ok(()) + }, + true, + ) + .chain_err(|| format!("failed to run custom build command for `{}`", pkg_name))?; + + // After the build command has finished running, we need to be sure to + // remember all of its output so we can later discover precisely what it + // was, even if we don't run the build command again (due to freshness). + // + // This is also the location where we provide feedback into the build + // state informing what variables were discovered via our script as + // well. + paths::write(&output_file, &output.stdout)?; + filetime::set_file_times(output_file, timestamp, timestamp)?; + paths::write(&err_file, &output.stderr)?; + paths::write(&root_output_file, util::path2bytes(&script_out_dir)?)?; + let parsed_output = + BuildOutput::parse(&output.stdout, &pkg_name, &script_out_dir, &script_out_dir)?; + + if json_messages { + emit_build_output(state, &parsed_output, id); + } + build_state.insert(id, kind, parsed_output); + Ok(()) + }); + + // Now that we've prepared our work-to-do, we need to prepare the fresh work + // itself to run when we actually end up just discarding what we calculated + // above. + let fresh = Work::new(move |state| { + let (id, pkg_name, build_state, output_file, script_out_dir) = all; + let output = match prev_output { + Some(output) => output, + None => BuildOutput::parse_file( + &output_file, + &pkg_name, + &prev_script_out_dir, + &script_out_dir, + )?, + }; + + if json_messages { + emit_build_output(state, &output, id); + } + + build_state.insert(id, kind, output); + Ok(()) + }); + + let mut job = if cx.bcx.build_config.build_plan { + Job::new(Work::noop(), Freshness::Dirty) + } else { + fingerprint::prepare_target(cx, unit, false)? + }; + if job.freshness() == Freshness::Dirty { + job.before(dirty); + } else { + job.before(fresh); + } + Ok(job) +} + +impl BuildState { + pub fn new(host_config: &TargetConfig, target_config: &TargetConfig) -> BuildState { + let mut overrides = HashMap::new(); + let i1 = host_config.overrides.iter().map(|p| (p, Kind::Host)); + let i2 = target_config.overrides.iter().map(|p| (p, Kind::Target)); + for ((name, output), kind) in i1.chain(i2) { + overrides.insert((name.clone(), kind), output.clone()); + } + BuildState { + outputs: Mutex::new(HashMap::new()), + overrides, + } + } + + fn insert(&self, id: PackageId, kind: Kind, output: BuildOutput) { + self.outputs.lock().unwrap().insert((id, kind), output); + } +} + +impl BuildOutput { + pub fn parse_file( + path: &Path, + pkg_name: &str, + script_out_dir_when_generated: &Path, + script_out_dir: &Path, + ) -> CargoResult { + let contents = paths::read_bytes(path)?; + BuildOutput::parse( + &contents, + pkg_name, + script_out_dir_when_generated, + script_out_dir, + ) + } + + // Parses the output of a script. + // The `pkg_name` is used for error messages. + pub fn parse( + input: &[u8], + pkg_name: &str, + script_out_dir_when_generated: &Path, + script_out_dir: &Path, + ) -> CargoResult { + let mut library_paths = Vec::new(); + let mut library_links = Vec::new(); + let mut linker_args = Vec::new(); + let mut cfgs = Vec::new(); + let mut env = Vec::new(); + let mut metadata = Vec::new(); + let mut rerun_if_changed = Vec::new(); + let mut rerun_if_env_changed = Vec::new(); + let mut warnings = Vec::new(); + let whence = format!("build script of `{}`", pkg_name); + + for line in input.split(|b| *b == b'\n') { + let line = match str::from_utf8(line) { + Ok(line) => line.trim(), + Err(..) => continue, + }; + let mut iter = line.splitn(2, ':'); + if iter.next() != Some("cargo") { + // skip this line since it doesn't start with "cargo:" + continue; + } + let data = match iter.next() { + Some(val) => val, + None => continue, + }; + + // getting the `key=value` part of the line + let mut iter = data.splitn(2, '='); + let key = iter.next(); + let value = iter.next(); + let (key, value) = match (key, value) { + (Some(a), Some(b)) => (a, b.trim_end()), + // Line started with `cargo:` but didn't match `key=value`. + _ => failure::bail!("Wrong output in {}: `{}`", whence, line), + }; + + // This will rewrite paths if the target directory has been moved. + let value = value.replace( + script_out_dir_when_generated.to_str().unwrap(), + script_out_dir.to_str().unwrap(), + ); + + match key { + "rustc-flags" => { + let (paths, links) = BuildOutput::parse_rustc_flags(&value, &whence)?; + library_links.extend(links.into_iter()); + library_paths.extend(paths.into_iter()); + } + "rustc-link-lib" => library_links.push(value.to_string()), + "rustc-link-search" => library_paths.push(PathBuf::from(value)), + "rustc-cdylib-link-arg" => linker_args.push(value.to_string()), + "rustc-cfg" => cfgs.push(value.to_string()), + "rustc-env" => env.push(BuildOutput::parse_rustc_env(&value, &whence)?), + "warning" => warnings.push(value.to_string()), + "rerun-if-changed" => rerun_if_changed.push(PathBuf::from(value)), + "rerun-if-env-changed" => rerun_if_env_changed.push(value.to_string()), + _ => metadata.push((key.to_string(), value.to_string())), + } + } + + Ok(BuildOutput { + library_paths, + library_links, + linker_args, + cfgs, + env, + metadata, + rerun_if_changed, + rerun_if_env_changed, + warnings, + }) + } + + pub fn parse_rustc_flags( + value: &str, + whence: &str, + ) -> CargoResult<(Vec, Vec)> { + let value = value.trim(); + let mut flags_iter = value + .split(|c: char| c.is_whitespace()) + .filter(|w| w.chars().any(|c| !c.is_whitespace())); + let (mut library_paths, mut library_links) = (Vec::new(), Vec::new()); + while let Some(flag) = flags_iter.next() { + if flag != "-l" && flag != "-L" { + failure::bail!( + "Only `-l` and `-L` flags are allowed in {}: `{}`", + whence, + value + ) + } + let value = match flags_iter.next() { + Some(v) => v, + None => failure::bail!( + "Flag in rustc-flags has no value in {}: `{}`", + whence, + value + ), + }; + match flag { + "-l" => library_links.push(value.to_string()), + "-L" => library_paths.push(PathBuf::from(value)), + + // was already checked above + _ => failure::bail!("only -l and -L flags are allowed"), + }; + } + Ok((library_paths, library_links)) + } + + pub fn parse_rustc_env(value: &str, whence: &str) -> CargoResult<(String, String)> { + let mut iter = value.splitn(2, '='); + let name = iter.next(); + let val = iter.next(); + match (name, val) { + (Some(n), Some(v)) => Ok((n.to_owned(), v.to_owned())), + _ => failure::bail!("Variable rustc-env has no value in {}: {}", whence, value), + } + } +} + +fn prepare_metabuild<'a, 'cfg>( + cx: &Context<'a, 'cfg>, + unit: &Unit<'a>, + deps: &[String], +) -> CargoResult<()> { + let mut output = Vec::new(); + let available_deps = cx.dep_targets(unit); + // Filter out optional dependencies, and look up the actual lib name. + let meta_deps: Vec<_> = deps + .iter() + .filter_map(|name| { + available_deps + .iter() + .find(|u| u.pkg.name().as_str() == name.as_str()) + .map(|dep| dep.target.crate_name()) + }) + .collect(); + for dep in &meta_deps { + output.push(format!("use {};\n", dep)); + } + output.push("fn main() {\n".to_string()); + for dep in &meta_deps { + output.push(format!(" {}::metabuild();\n", dep)); + } + output.push("}\n".to_string()); + let output = output.join(""); + let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); + fs::create_dir_all(path.parent().unwrap())?; + paths::write_if_changed(path, &output)?; + Ok(()) +} + +impl BuildDeps { + pub fn new(output_file: &Path, output: Option<&BuildOutput>) -> BuildDeps { + BuildDeps { + build_script_output: output_file.to_path_buf(), + rerun_if_changed: output + .map(|p| &p.rerun_if_changed) + .cloned() + .unwrap_or_default(), + rerun_if_env_changed: output + .map(|p| &p.rerun_if_env_changed) + .cloned() + .unwrap_or_default(), + } + } +} + +/// Computes the `build_scripts` map in the `Context` which tracks what build +/// scripts each package depends on. +/// +/// The global `build_scripts` map lists for all (package, kind) tuples what set +/// of packages' build script outputs must be considered. For example this lists +/// all dependencies' `-L` flags which need to be propagated transitively. +/// +/// The given set of targets to this function is the initial set of +/// targets/profiles which are being built. +pub fn build_map<'b, 'cfg>(cx: &mut Context<'b, 'cfg>, units: &[Unit<'b>]) -> CargoResult<()> { + let mut ret = HashMap::new(); + for unit in units { + build(&mut ret, cx, unit)?; + } + cx.build_scripts + .extend(ret.into_iter().map(|(k, v)| (k, Arc::new(v)))); + return Ok(()); + + // Recursive function to build up the map we're constructing. This function + // memoizes all of its return values as it goes along. + fn build<'a, 'b, 'cfg>( + out: &'a mut HashMap, BuildScripts>, + cx: &mut Context<'b, 'cfg>, + unit: &Unit<'b>, + ) -> CargoResult<&'a BuildScripts> { + // Do a quick pre-flight check to see if we've already calculated the + // set of dependencies. + if out.contains_key(unit) { + return Ok(&out[unit]); + } + + let key = unit + .pkg + .manifest() + .links() + .map(|l| (l.to_string(), unit.kind)); + let build_state = &cx.build_state; + if let Some(output) = key.and_then(|k| build_state.overrides.get(&k)) { + let key = (unit.pkg.package_id(), unit.kind); + cx.build_script_overridden.insert(key); + build_state + .outputs + .lock() + .unwrap() + .insert(key, output.clone()); + } + + let mut ret = BuildScripts::default(); + + if !unit.target.is_custom_build() && unit.pkg.has_custom_build() { + add_to_link(&mut ret, unit.pkg.package_id(), unit.kind); + } + + if unit.mode.is_run_custom_build() { + parse_previous_explicit_deps(cx, unit)?; + } + + // We want to invoke the compiler deterministically to be cache-friendly + // to rustc invocation caching schemes, so be sure to generate the same + // set of build script dependency orderings via sorting the targets that + // come out of the `Context`. + let mut targets = cx.dep_targets(unit); + targets.sort_by_key(|u| u.pkg.package_id()); + + for unit in targets.iter() { + let dep_scripts = build(out, cx, unit)?; + + if unit.target.for_host() { + ret.plugins + .extend(dep_scripts.to_link.iter().map(|p| &p.0).cloned()); + } else if unit.target.linkable() { + for &(pkg, kind) in dep_scripts.to_link.iter() { + add_to_link(&mut ret, pkg, kind); + } + } + } + + match out.entry(*unit) { + Entry::Vacant(entry) => Ok(entry.insert(ret)), + Entry::Occupied(_) => panic!("cyclic dependencies in `build_map`"), + } + } + + // When adding an entry to 'to_link' we only actually push it on if the + // script hasn't seen it yet (e.g., we don't push on duplicates). + fn add_to_link(scripts: &mut BuildScripts, pkg: PackageId, kind: Kind) { + if scripts.seen_to_link.insert((pkg, kind)) { + scripts.to_link.push((pkg, kind)); + } + } + + fn parse_previous_explicit_deps<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + ) -> CargoResult<()> { + let script_run_dir = cx.files().build_script_run_dir(unit); + let output_file = script_run_dir.join("output"); + let (prev_output, _) = prev_build_output(cx, unit); + let deps = BuildDeps::new(&output_file, prev_output.as_ref()); + cx.build_explicit_deps.insert(*unit, deps); + Ok(()) + } +} + +/// Returns the previous parsed `BuildOutput`, if any, from a previous +/// execution. +/// +/// Also returns the directory containing the output, typically used later in +/// processing. +fn prev_build_output<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> (Option, PathBuf) { + let script_out_dir = cx.files().build_script_out_dir(unit); + let script_run_dir = cx.files().build_script_run_dir(unit); + let root_output_file = script_run_dir.join("root-output"); + let output_file = script_run_dir.join("output"); + + let prev_script_out_dir = paths::read_bytes(&root_output_file) + .and_then(|bytes| util::bytes2path(&bytes)) + .unwrap_or_else(|_| script_out_dir.clone()); + + ( + BuildOutput::parse_file( + &output_file, + &unit.pkg.to_string(), + &prev_script_out_dir, + &script_out_dir, + ) + .ok(), + prev_script_out_dir, + ) +} diff --git a/src/cargo/core/compiler/mod.rs b/src/cargo/core/compiler/mod.rs index 4962e774506..0797424b2e5 100644 --- a/src/cargo/core/compiler/mod.rs +++ b/src/cargo/core/compiler/mod.rs @@ -1,1329 +1,1343 @@ -mod build_config; -mod build_context; -mod build_plan; -mod compilation; -mod context; -mod custom_build; -mod fingerprint; -mod job; -mod job_queue; -mod layout; -mod output_depinfo; -mod unit; - -use std::env; -use std::ffi::{OsStr, OsString}; -use std::fs::{self, File}; -use std::io::Write; -use std::path::{Path, PathBuf}; -use std::sync::Arc; - -use failure::{bail, Error}; -use lazycell::LazyCell; -use log::debug; -use same_file::is_same_file; -use serde::Serialize; - -pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; -pub use self::build_context::{BuildContext, FileFlavor, TargetConfig, TargetInfo}; -use self::build_plan::BuildPlan; -pub use self::compilation::{Compilation, Doctest}; -pub use self::context::Context; -pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts}; -pub use self::job::Freshness; -use self::job::{Job, Work}; -use self::job_queue::{JobQueue, JobState}; -pub use self::layout::is_bad_artifact_name; -use self::output_depinfo::output_depinfo; -pub use crate::core::compiler::unit::{Unit, UnitInterner}; -use crate::core::manifest::TargetSourcePath; -use crate::core::profiles::{Lto, PanicStrategy, Profile}; -use crate::core::Feature; -use crate::core::{PackageId, Target}; -use crate::util::errors::{CargoResult, CargoResultExt, Internal, ProcessError}; -use crate::util::machine_message::Message; -use crate::util::paths; -use crate::util::{self, machine_message, ProcessBuilder}; -use crate::util::{internal, join_paths, profile}; - -/// Indicates whether an object is for the host architcture or the target architecture. -/// -/// These will be the same unless cross-compiling. -#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord, Serialize)] -pub enum Kind { - Host, - Target, -} - -/// A glorified callback for executing calls to rustc. Rather than calling rustc -/// directly, we'll use an `Executor`, giving clients an opportunity to intercept -/// the build calls. -pub trait Executor: Send + Sync + 'static { - /// Called after a rustc process invocation is prepared up-front for a given - /// unit of work (may still be modified for runtime-known dependencies, when - /// the work is actually executed). - fn init<'a, 'cfg>(&self, _cx: &Context<'a, 'cfg>, _unit: &Unit<'a>) {} - - /// In case of an `Err`, Cargo will not continue with the build process for - /// this package. - fn exec( - &self, - cmd: ProcessBuilder, - id: PackageId, - target: &Target, - mode: CompileMode, - on_stdout_line: &mut dyn FnMut(&str) -> CargoResult<()>, - on_stderr_line: &mut dyn FnMut(&str) -> CargoResult<()>, - ) -> CargoResult<()>; - - /// Queried when queuing each unit of work. If it returns true, then the - /// unit will always be rebuilt, independent of whether it needs to be. - fn force_rebuild(&self, _unit: &Unit<'_>) -> bool { - false - } -} - -/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's -/// default behaviour. -#[derive(Copy, Clone)] -pub struct DefaultExecutor; - -impl Executor for DefaultExecutor { - fn exec( - &self, - cmd: ProcessBuilder, - _id: PackageId, - _target: &Target, - _mode: CompileMode, - on_stdout_line: &mut dyn FnMut(&str) -> CargoResult<()>, - on_stderr_line: &mut dyn FnMut(&str) -> CargoResult<()>, - ) -> CargoResult<()> { - cmd.exec_with_streaming(on_stdout_line, on_stderr_line, false) - .map(drop) - } -} - -fn compile<'a, 'cfg: 'a>( - cx: &mut Context<'a, 'cfg>, - jobs: &mut JobQueue<'a, 'cfg>, - plan: &mut BuildPlan, - unit: &Unit<'a>, - exec: &Arc, - force_rebuild: bool, -) -> CargoResult<()> { - let bcx = cx.bcx; - let build_plan = bcx.build_config.build_plan; - if !cx.compiled.insert(*unit) { - return Ok(()); - } - - // Build up the work to be done to compile this unit, enqueuing it once - // we've got everything constructed. - let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name())); - fingerprint::prepare_init(cx, unit)?; - cx.links.validate(bcx.resolve, unit)?; - - let job = if unit.mode.is_run_custom_build() { - custom_build::prepare(cx, unit)? - } else if unit.mode.is_doc_test() { - // We run these targets later, so this is just a no-op for now. - Job::new(Work::noop(), Freshness::Fresh) - } else if build_plan { - Job::new(rustc(cx, unit, &exec.clone())?, Freshness::Dirty) - } else { - let force = exec.force_rebuild(unit) || force_rebuild; - let mut job = fingerprint::prepare_target(cx, unit, force)?; - job.before(if job.freshness() == Freshness::Dirty { - let work = if unit.mode.is_doc() { - rustdoc(cx, unit)? - } else { - rustc(cx, unit, exec)? - }; - work.then(link_targets(cx, unit, false)?) - } else { - let work = if cx.bcx.build_config.cache_messages() - && cx.bcx.show_warnings(unit.pkg.package_id()) - { - replay_output_cache( - unit.pkg.package_id(), - unit.target, - cx.files().message_cache_path(unit), - cx.bcx.build_config.message_format, - cx.bcx.config.shell().supports_color(), - ) - } else { - Work::noop() - }; - // Need to link targets on both the dirty and fresh. - work.then(link_targets(cx, unit, true)?) - }); - - job - }; - jobs.enqueue(cx, unit, job)?; - drop(p); - - // Be sure to compile all dependencies of this target as well. - for unit in cx.dep_targets(unit).iter() { - compile(cx, jobs, plan, unit, exec, false)?; - } - if build_plan { - plan.add(cx, unit)?; - } - - Ok(()) -} - -fn rustc<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - exec: &Arc, -) -> CargoResult { - let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; - let build_plan = cx.bcx.build_config.build_plan; - - let name = unit.pkg.name().to_string(); - let buildkey = unit.buildkey(); - - add_cap_lints(cx.bcx, unit, &mut rustc); - - let outputs = cx.outputs(unit)?; - let root = cx.files().out_dir(unit); - let kind = unit.kind; - - // Prepare the native lib state (extra `-L` and `-l` flags). - let build_state = cx.build_state.clone(); - let current_id = unit.pkg.package_id(); - let build_deps = load_build_deps(cx, unit); - - // If we are a binary and the package also contains a library, then we - // don't pass the `-l` flags. - let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib()); - let pass_cdylib_link_args = unit.target.is_cdylib(); - let do_rename = unit.target.allows_underscores() && !unit.mode.is_any_test(); - let real_name = unit.target.name().to_string(); - let crate_name = unit.target.crate_name(); - - // Rely on `target_filenames` iterator as source of truth rather than rederiving filestem. - let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() { - root.join(&crate_name) - } else { - root.join(&cx.files().file_stem(unit)) - } - .with_extension("d"); - let dep_info_loc = fingerprint::dep_info_loc(cx, unit); - - rustc.args(cx.bcx.rustflags_args(unit)); - if cx.bcx.config.cli_unstable().binary_dep_depinfo { - rustc.arg("-Zbinary-dep-depinfo"); - } - let mut output_options = OutputOptions::new(cx, unit); - let package_id = unit.pkg.package_id(); - let target = unit.target.clone(); - let mode = unit.mode; - - exec.init(cx, unit); - let exec = exec.clone(); - - let root_output = cx.files().host_root().to_path_buf(); - let target_dir = cx.bcx.ws.target_dir().into_path_unlocked(); - let pkg_root = unit.pkg.root().to_path_buf(); - let cwd = rustc - .get_cwd() - .unwrap_or_else(|| cx.bcx.config.cwd()) - .to_path_buf(); - let fingerprint_dir = cx.files().fingerprint_dir(unit); - - return Ok(Work::new(move |state| { - // Only at runtime have we discovered what the extra -L and -l - // arguments are for native libraries, so we process those here. We - // also need to be sure to add any -L paths for our plugins to the - // dynamic library load path as a plugin's dynamic library may be - // located somewhere in there. - // Finally, if custom environment variables have been produced by - // previous build scripts, we include them in the rustc invocation. - if let Some(build_deps) = build_deps { - let build_state = build_state.outputs.lock().unwrap(); - if !build_plan { - add_native_deps( - &mut rustc, - &build_state, - &build_deps, - pass_l_flag, - pass_cdylib_link_args, - current_id, - )?; - add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?; - } - add_custom_env(&mut rustc, &build_state, current_id, kind)?; - } - - for output in outputs.iter() { - // If there is both an rmeta and rlib, rustc will prefer to use the - // rlib, even if it is older. Therefore, we must delete the rlib to - // force using the new rmeta. - if output.path.extension() == Some(OsStr::new("rmeta")) { - let dst = root.join(&output.path).with_extension("rlib"); - if dst.exists() { - paths::remove_file(&dst)?; - } - } - } - - fn internal_if_simple_exit_code(err: Error) -> Error { - // If a signal on unix (`code == None`) or an abnormal termination - // on Windows (codes like `0xC0000409`), don't hide the error details. - match err - .downcast_ref::() - .as_ref() - .and_then(|perr| perr.exit.and_then(|e| e.code())) - { - Some(n) if n < 128 => Internal::new(err).into(), - _ => err, - } - } - - state.running(&rustc); - let timestamp = paths::set_invocation_time(&fingerprint_dir)?; - if build_plan { - state.build_plan(buildkey, rustc.clone(), outputs.clone()); - } else { - exec.exec( - rustc, - package_id, - &target, - mode, - &mut |line| on_stdout_line(state, line, package_id, &target), - &mut |line| on_stderr_line(state, line, package_id, &target, &mut output_options), - ) - .map_err(internal_if_simple_exit_code) - .chain_err(|| format!("Could not compile `{}`.", name))?; - } - - if do_rename && real_name != crate_name { - let dst = &outputs[0].path; - let src = dst.with_file_name( - dst.file_name() - .unwrap() - .to_str() - .unwrap() - .replace(&real_name, &crate_name), - ); - if src.exists() && src.file_name() != dst.file_name() { - fs::rename(&src, &dst) - .chain_err(|| internal(format!("could not rename crate {:?}", src)))?; - } - } - - if rustc_dep_info_loc.exists() { - fingerprint::translate_dep_info( - &rustc_dep_info_loc, - &dep_info_loc, - &cwd, - &pkg_root, - &target_dir, - // Do not track source files in the fingerprint for registry dependencies. - current_id.source_id().is_path(), - ) - .chain_err(|| { - internal(format!( - "could not parse/generate dep info at: {}", - rustc_dep_info_loc.display() - )) - })?; - filetime::set_file_times(dep_info_loc, timestamp, timestamp)?; - } - - Ok(()) - })); - - // Add all relevant `-L` and `-l` flags from dependencies (now calculated and - // present in `state`) to the command provided. - fn add_native_deps( - rustc: &mut ProcessBuilder, - build_state: &BuildMap, - build_scripts: &BuildScripts, - pass_l_flag: bool, - pass_cdylib_link_args: bool, - current_id: PackageId, - ) -> CargoResult<()> { - for key in build_scripts.to_link.iter() { - let output = build_state.get(key).ok_or_else(|| { - internal(format!( - "couldn't find build state for {}/{:?}", - key.0, key.1 - )) - })?; - for path in output.library_paths.iter() { - rustc.arg("-L").arg(path); - } - if key.0 == current_id { - for cfg in &output.cfgs { - rustc.arg("--cfg").arg(cfg); - } - if pass_l_flag { - for name in output.library_links.iter() { - rustc.arg("-l").arg(name); - } - } - if pass_cdylib_link_args { - for arg in output.linker_args.iter() { - let link_arg = format!("link-arg={}", arg); - rustc.arg("-C").arg(link_arg); - } - } - } - } - Ok(()) - } - - // Add all custom environment variables present in `state` (after they've - // been put there by one of the `build_scripts`) to the command provided. - fn add_custom_env( - rustc: &mut ProcessBuilder, - build_state: &BuildMap, - current_id: PackageId, - kind: Kind, - ) -> CargoResult<()> { - let key = (current_id, kind); - if let Some(output) = build_state.get(&key) { - for &(ref name, ref value) in output.env.iter() { - rustc.env(name, value); - } - } - Ok(()) - } -} - -/// Link the compiled target (often of form `foo-{metadata_hash}`) to the -/// final target. This must happen during both "Fresh" and "Compile". -fn link_targets<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, - fresh: bool, -) -> CargoResult { - let bcx = cx.bcx; - let outputs = cx.outputs(unit)?; - let export_dir = cx.files().export_dir(); - let package_id = unit.pkg.package_id(); - let profile = unit.profile; - let unit_mode = unit.mode; - let features = bcx - .resolve - .features_sorted(package_id) - .into_iter() - .map(|s| s.to_owned()) - .collect(); - let json_messages = bcx.build_config.emit_json(); - let executable = cx.get_executable(unit)?; - let mut target = unit.target.clone(); - if let TargetSourcePath::Metabuild = target.src_path() { - // Give it something to serialize. - let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); - target.set_src_path(TargetSourcePath::Path(path)); - } - - Ok(Work::new(move |state| { - // If we're a "root crate", e.g., the target of this compilation, then we - // hard link our outputs out of the `deps` directory into the directory - // above. This means that `cargo build` will produce binaries in - // `target/debug` which one probably expects. - let mut destinations = vec![]; - for output in outputs.iter() { - let src = &output.path; - // This may have been a `cargo rustc` command which changes the - // output, so the source may not actually exist. - if !src.exists() { - continue; - } - let dst = match output.hardlink.as_ref() { - Some(dst) => dst, - None => { - destinations.push(src.clone()); - continue; - } - }; - destinations.push(dst.clone()); - hardlink_or_copy(src, dst)?; - if let Some(ref path) = output.export_path { - let export_dir = export_dir.as_ref().unwrap(); - if !export_dir.exists() { - fs::create_dir_all(export_dir)?; - } - - hardlink_or_copy(src, path)?; - } - } - - if json_messages { - let art_profile = machine_message::ArtifactProfile { - opt_level: profile.opt_level.as_str(), - debuginfo: profile.debuginfo, - debug_assertions: profile.debug_assertions, - overflow_checks: profile.overflow_checks, - test: unit_mode.is_any_test(), - }; - - let msg = machine_message::Artifact { - package_id, - target: &target, - profile: art_profile, - features, - filenames: destinations, - executable, - fresh, - } - .to_json_string(); - state.stdout(msg); - } - Ok(()) - })) -} - -fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { - debug!("linking {} to {}", src.display(), dst.display()); - if is_same_file(src, dst).unwrap_or(false) { - return Ok(()); - } - if dst.exists() { - paths::remove_file(&dst)?; - } - - let link_result = if src.is_dir() { - #[cfg(target_os = "redox")] - use std::os::redox::fs::symlink; - #[cfg(unix)] - use std::os::unix::fs::symlink; - #[cfg(windows)] - use std::os::windows::fs::symlink_dir as symlink; - - let dst_dir = dst.parent().unwrap(); - let src = if src.starts_with(dst_dir) { - src.strip_prefix(dst_dir).unwrap() - } else { - src - }; - symlink(src, dst) - } else { - fs::hard_link(src, dst) - }; - link_result - .or_else(|err| { - debug!("link failed {}. falling back to fs::copy", err); - fs::copy(src, dst).map(|_| ()) - }) - .chain_err(|| { - format!( - "failed to link or copy `{}` to `{}`", - src.display(), - dst.display() - ) - })?; - Ok(()) -} - -fn load_build_deps(cx: &Context<'_, '_>, unit: &Unit<'_>) -> Option> { - cx.build_scripts.get(unit).cloned() -} - -// For all plugin dependencies, add their -L paths (now calculated and -// present in `state`) to the dynamic library load path for the command to -// execute. -fn add_plugin_deps( - rustc: &mut ProcessBuilder, - build_state: &BuildMap, - build_scripts: &BuildScripts, - root_output: &PathBuf, -) -> CargoResult<()> { - let var = util::dylib_path_envvar(); - let search_path = rustc.get_env(var).unwrap_or_default(); - let mut search_path = env::split_paths(&search_path).collect::>(); - for &id in build_scripts.plugins.iter() { - let output = build_state - .get(&(id, Kind::Host)) - .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?; - search_path.append(&mut filter_dynamic_search_path( - output.library_paths.iter(), - root_output, - )); - } - let search_path = join_paths(&search_path, var)?; - rustc.env(var, &search_path); - Ok(()) -} - -// Determine paths to add to the dynamic search path from -L entries -// -// Strip off prefixes like "native=" or "framework=" and filter out directories -// **not** inside our output directory since they are likely spurious and can cause -// clashes with system shared libraries (issue #3366). -fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec -where - I: Iterator, -{ - let mut search_path = vec![]; - for dir in paths { - let dir = match dir.to_str() { - Some(s) => { - let mut parts = s.splitn(2, '='); - match (parts.next(), parts.next()) { - (Some("native"), Some(path)) - | (Some("crate"), Some(path)) - | (Some("dependency"), Some(path)) - | (Some("framework"), Some(path)) - | (Some("all"), Some(path)) => path.into(), - _ => dir.clone(), - } - } - None => dir.clone(), - }; - if dir.starts_with(&root_output) { - search_path.push(dir); - } else { - debug!( - "Not including path {} in runtime library search path because it is \ - outside target root {}", - dir.display(), - root_output.display() - ); - } - } - search_path -} - -fn prepare_rustc<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - crate_types: &[&str], - unit: &Unit<'a>, -) -> CargoResult { - let is_primary = cx.is_primary_package(unit); - - let mut base = cx - .compilation - .rustc_process(unit.pkg, unit.target, is_primary)?; - base.inherit_jobserver(&cx.jobserver); - build_base_args(cx, &mut base, unit, crate_types)?; - build_deps_args(&mut base, cx, unit)?; - Ok(base) -} - -fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { - let bcx = cx.bcx; - let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg, unit.target)?; - rustdoc.inherit_jobserver(&cx.jobserver); - rustdoc.arg("--crate-name").arg(&unit.target.crate_name()); - add_path_args(bcx, unit, &mut rustdoc); - add_cap_lints(bcx, unit, &mut rustdoc); - add_color(bcx, &mut rustdoc); - - if unit.kind != Kind::Host { - if let Some(ref target) = bcx.build_config.requested_target { - rustdoc.arg("--target").arg(target); - } - } - - let doc_dir = cx.files().out_dir(unit); - - // Create the documentation directory ahead of time as rustdoc currently has - // a bug where concurrent invocations will race to create this directory if - // it doesn't already exist. - fs::create_dir_all(&doc_dir)?; - - rustdoc.arg("-o").arg(doc_dir); - - for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { - rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); - } - - add_error_format(cx, &mut rustdoc, false, false)?; - - if let Some(args) = bcx.extra_args_for(unit) { - rustdoc.args(args); - } - - build_deps_args(&mut rustdoc, cx, unit)?; - - rustdoc.args(bcx.rustdocflags_args(unit)); - - let name = unit.pkg.name().to_string(); - let build_state = cx.build_state.clone(); - let key = (unit.pkg.package_id(), unit.kind); - let package_id = unit.pkg.package_id(); - let target = unit.target.clone(); - let mut output_options = OutputOptions::new(cx, unit); - - Ok(Work::new(move |state| { - if let Some(output) = build_state.outputs.lock().unwrap().get(&key) { - for cfg in output.cfgs.iter() { - rustdoc.arg("--cfg").arg(cfg); - } - for &(ref name, ref value) in output.env.iter() { - rustdoc.env(name, value); - } - } - state.running(&rustdoc); - - rustdoc - .exec_with_streaming( - &mut |line| on_stdout_line(state, line, package_id, &target), - &mut |line| on_stderr_line(state, line, package_id, &target, &mut output_options), - false, - ) - .chain_err(|| format!("Could not document `{}`.", name))?; - Ok(()) - })) -} - -// The path that we pass to rustc is actually fairly important because it will -// show up in error messages (important for readability), debug information -// (important for caching), etc. As a result we need to be pretty careful how we -// actually invoke rustc. -// -// In general users don't expect `cargo build` to cause rebuilds if you change -// directories. That could be if you just change directories in the package or -// if you literally move the whole package wholesale to a new directory. As a -// result we mostly don't factor in `cwd` to this calculation. Instead we try to -// track the workspace as much as possible and we update the current directory -// of rustc/rustdoc where appropriate. -// -// The first returned value here is the argument to pass to rustc, and the -// second is the cwd that rustc should operate in. -fn path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>) -> (PathBuf, PathBuf) { - let ws_root = bcx.ws.root(); - let src = match unit.target.src_path() { - TargetSourcePath::Path(path) => path.to_path_buf(), - TargetSourcePath::Metabuild => unit.pkg.manifest().metabuild_path(bcx.ws.target_dir()), - }; - assert!(src.is_absolute()); - if unit.pkg.package_id().source_id().is_path() { - if let Ok(path) = src.strip_prefix(ws_root) { - return (path.to_path_buf(), ws_root.to_path_buf()); - } - } - (src, unit.pkg.root().to_path_buf()) -} - -fn add_path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { - let (arg, cwd) = path_args(bcx, unit); - cmd.arg(arg); - cmd.cwd(cwd); -} - -fn add_cap_lints(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { - // If this is an upstream dep we don't want warnings from, turn off all - // lints. - if !bcx.show_warnings(unit.pkg.package_id()) { - cmd.arg("--cap-lints").arg("allow"); - - // If this is an upstream dep but we *do* want warnings, make sure that they - // don't fail compilation. - } else if !unit.pkg.package_id().source_id().is_path() { - cmd.arg("--cap-lints").arg("warn"); - } -} - -fn add_color(bcx: &BuildContext<'_, '_>, cmd: &mut ProcessBuilder) { - let shell = bcx.config.shell(); - let color = if shell.supports_color() { - "always" - } else { - "never" - }; - cmd.args(&["--color", color]); -} - -/// Add error-format flags to the command. -/// -/// This is rather convoluted right now. The general overview is: -/// - If -Zcache-messages or `build.pipelining` is enabled, Cargo always uses -/// JSON output. This has several benefits, such as being easier to parse, -/// handles changing formats (for replaying cached messages), ensures -/// atomic output (so messages aren't interleaved), etc. -/// - `supports_termcolor` is a temporary flag. rustdoc does not yet support -/// the `--json-rendered` flag, but it is intended to fix that soon. -/// - `short` output is not yet supported for JSON output. We haven't yet -/// decided how this problem will be resolved. Probably either adding -/// "short" to the JSON output, or more ambitiously moving diagnostic -/// rendering to an external library that Cargo can share with rustc. -/// -/// It is intended in the future that Cargo *always* uses the JSON output, and -/// this function can be simplified. The above issues need to be resolved, the -/// flags need to be stabilized, and we need more testing to ensure there -/// aren't any regressions. -fn add_error_format( - cx: &Context<'_, '_>, - cmd: &mut ProcessBuilder, - pipelined: bool, - supports_termcolor: bool, -) -> CargoResult<()> { - // If this unit is producing a required rmeta file then we need to know - // when the rmeta file is ready so we can signal to the rest of Cargo that - // it can continue dependent compilations. To do this we are currently - // required to switch the compiler into JSON message mode, but we still - // want to present human readable errors as well. (this rabbit hole just - // goes and goes) - // - // All that means is that if we're not already in JSON mode we need to - // switch to JSON mode, ensure that rustc error messages can be rendered - // prettily, and then when parsing JSON messages from rustc we need to - // internally understand that we should extract the `rendered` field and - // present it if we can. - if cx.bcx.build_config.cache_messages() || pipelined { - cmd.arg("--error-format=json").arg("-Zunstable-options"); - if supports_termcolor { - cmd.arg("--json-rendered=termcolor"); - } - if cx.bcx.build_config.message_format == MessageFormat::Short { - // FIXME(rust-lang/rust#60419): right now we have no way of - // turning on JSON messages from the compiler and also asking - // the rendered field to be in the `short` format. - bail!( - "currently `--message-format short` is incompatible with {}", - if pipelined { - "pipelined compilation" - } else { - "cached output" - } - ); - } - if pipelined { - cmd.arg("-Zemit-artifact-notifications"); - } - } else { - match cx.bcx.build_config.message_format { - MessageFormat::Human => (), - MessageFormat::Json => { - cmd.arg("--error-format").arg("json"); - } - MessageFormat::Short => { - cmd.arg("--error-format").arg("short"); - } - } - } - Ok(()) -} - -fn build_base_args<'a, 'cfg>( - cx: &mut Context<'a, 'cfg>, - cmd: &mut ProcessBuilder, - unit: &Unit<'a>, - crate_types: &[&str], -) -> CargoResult<()> { - assert!(!unit.mode.is_run_custom_build()); - - let bcx = cx.bcx; - let Profile { - ref opt_level, - ref lto, - codegen_units, - debuginfo, - debug_assertions, - overflow_checks, - rpath, - ref panic, - incremental, - .. - } = unit.profile; - let test = unit.mode.is_any_test(); - - cmd.arg("--crate-name").arg(&unit.target.crate_name()); - - add_path_args(bcx, unit, cmd); - add_color(bcx, cmd); - add_error_format(cx, cmd, cx.rmeta_required(unit), true)?; - - if !test { - for crate_type in crate_types.iter() { - cmd.arg("--crate-type").arg(crate_type); - } - } - - if unit.mode.is_check() { - cmd.arg("--emit=dep-info,metadata"); - } else if !unit.requires_upstream_objects() { - // Always produce metdata files for rlib outputs. Metadata may be used - // in this session for a pipelined compilation, or it may be used in a - // future Cargo session as part of a pipelined compile. - cmd.arg("--emit=dep-info,metadata,link"); - } else { - cmd.arg("--emit=dep-info,link"); - } - - let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build()) - || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg)); - if prefer_dynamic { - cmd.arg("-C").arg("prefer-dynamic"); - } - - if opt_level.as_str() != "0" { - cmd.arg("-C").arg(&format!("opt-level={}", opt_level)); - } - - if *panic != PanicStrategy::Unwind { - cmd.arg("-C").arg(format!("panic={}", panic)); - } - - // Disable LTO for host builds as prefer_dynamic and it are mutually - // exclusive. - if unit.target.can_lto() && !unit.target.for_host() { - match *lto { - Lto::Bool(false) => {} - Lto::Bool(true) => { - cmd.args(&["-C", "lto"]); - } - Lto::Named(ref s) => { - cmd.arg("-C").arg(format!("lto={}", s)); - } - } - } - - if let Some(n) = codegen_units { - // There are some restrictions with LTO and codegen-units, so we - // only add codegen units when LTO is not used. - cmd.arg("-C").arg(&format!("codegen-units={}", n)); - } - - if let Some(debuginfo) = debuginfo { - cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); - } - - if let Some(args) = bcx.extra_args_for(unit) { - cmd.args(args); - } - - // `-C overflow-checks` is implied by the setting of `-C debug-assertions`, - // so we only need to provide `-C overflow-checks` if it differs from - // the value of `-C debug-assertions` we would provide. - if opt_level.as_str() != "0" { - if debug_assertions { - cmd.args(&["-C", "debug-assertions=on"]); - if !overflow_checks { - cmd.args(&["-C", "overflow-checks=off"]); - } - } else if overflow_checks { - cmd.args(&["-C", "overflow-checks=on"]); - } - } else if !debug_assertions { - cmd.args(&["-C", "debug-assertions=off"]); - if overflow_checks { - cmd.args(&["-C", "overflow-checks=on"]); - } - } else if !overflow_checks { - cmd.args(&["-C", "overflow-checks=off"]); - } - - if test && unit.target.harness() { - cmd.arg("--test"); - } else if test { - cmd.arg("--cfg").arg("test"); - } - - // We ideally want deterministic invocations of rustc to ensure that - // rustc-caching strategies like sccache are able to cache more, so sort the - // feature list here. - for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { - cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); - } - - match cx.files().metadata(unit) { - Some(m) => { - cmd.arg("-C").arg(&format!("metadata={}", m)); - cmd.arg("-C").arg(&format!("extra-filename=-{}", m)); - } - None => { - cmd.arg("-C") - .arg(&format!("metadata={}", cx.files().target_short_hash(unit))); - } - } - - if rpath { - cmd.arg("-C").arg("rpath"); - } - - cmd.arg("--out-dir").arg(&cx.files().out_dir(unit)); - - fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) { - if let Some(val) = val { - let mut joined = OsString::from(prefix); - joined.push(val); - cmd.arg(key).arg(joined); - } - } - - if unit.kind == Kind::Target { - opt( - cmd, - "--target", - "", - bcx.build_config - .requested_target - .as_ref() - .map(|s| s.as_ref()), - ); - } - - opt(cmd, "-C", "ar=", bcx.ar(unit.kind).map(|s| s.as_ref())); - opt( - cmd, - "-C", - "linker=", - bcx.linker(unit.kind).map(|s| s.as_ref()), - ); - if incremental { - let dir = cx.files().layout(unit.kind).incremental().as_os_str(); - opt(cmd, "-C", "incremental=", Some(dir)); - } - Ok(()) -} - -fn build_deps_args<'a, 'cfg>( - cmd: &mut ProcessBuilder, - cx: &mut Context<'a, 'cfg>, - unit: &Unit<'a>, -) -> CargoResult<()> { - let bcx = cx.bcx; - cmd.arg("-L").arg(&{ - let mut deps = OsString::from("dependency="); - deps.push(cx.files().deps_dir(unit)); - deps - }); - - // Be sure that the host path is also listed. This'll ensure that proc macro - // dependencies are correctly found (for reexported macros). - if let Kind::Target = unit.kind { - cmd.arg("-L").arg(&{ - let mut deps = OsString::from("dependency="); - deps.push(cx.files().host_deps()); - deps - }); - } - - let dep_targets = cx.dep_targets(unit); - - // If there is not one linkable target but should, rustc fails later - // on if there is an `extern crate` for it. This may turn into a hard - // error in the future (see PR #4797). - if !dep_targets - .iter() - .any(|u| !u.mode.is_doc() && u.target.linkable()) - { - if let Some(u) = dep_targets - .iter() - .find(|u| !u.mode.is_doc() && u.target.is_lib()) - { - bcx.config.shell().warn(format!( - "The package `{}` \ - provides no linkable target. The compiler might raise an error while compiling \ - `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \ - Cargo.toml. This warning might turn into a hard error in the future.", - u.target.crate_name(), - unit.target.crate_name(), - u.target.crate_name() - ))?; - } - } - - let mut unstable_opts = false; - - for dep in dep_targets { - if dep.mode.is_run_custom_build() { - cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep)); - } - if dep.target.linkable() && !dep.mode.is_doc() { - link_to(cmd, cx, unit, &dep, &mut unstable_opts)?; - } - } - - // This will only be set if we're already using a feature - // requiring nightly rust - if unstable_opts { - cmd.arg("-Z").arg("unstable-options"); - } - - return Ok(()); - - fn link_to<'a, 'cfg>( - cmd: &mut ProcessBuilder, - cx: &mut Context<'a, 'cfg>, - current: &Unit<'a>, - dep: &Unit<'a>, - need_unstable_opts: &mut bool, - ) -> CargoResult<()> { - let bcx = cx.bcx; - - let mut value = OsString::new(); - value.push(bcx.extern_crate_name(current, dep)?); - value.push("="); - - let mut pass = |file| { - let mut value = value.clone(); - value.push(file); - - if current - .pkg - .manifest() - .features() - .require(Feature::public_dependency()) - .is_ok() - && !bcx.is_public_dependency(current, dep) - { - cmd.arg("--extern-private"); - *need_unstable_opts = true; - } else { - cmd.arg("--extern"); - } - - cmd.arg(&value); - }; - - let outputs = cx.outputs(dep)?; - let mut outputs = outputs.iter().filter_map(|output| match output.flavor { - FileFlavor::Linkable { rmeta } => Some((output, rmeta)), - _ => None, - }); - - if cx.only_requires_rmeta(current, dep) { - let (output, _rmeta) = outputs - .find(|(_output, rmeta)| *rmeta) - .expect("failed to find rlib dep for pipelined dep"); - pass(&output.path); - } else { - for (output, rmeta) in outputs { - if !rmeta { - pass(&output.path); - } - } - } - Ok(()) - } -} - -fn envify(s: &str) -> String { - s.chars() - .flat_map(|c| c.to_uppercase()) - .map(|c| if c == '-' { '_' } else { c }) - .collect() -} - -impl Kind { - fn for_target(self, target: &Target) -> Kind { - // Once we start compiling for the `Host` kind we continue doing so, but - // if we are a `Target` kind and then we start compiling for a target - // that needs to be on the host we lift ourselves up to `Host`. - match self { - Kind::Host => Kind::Host, - Kind::Target if target.for_host() => Kind::Host, - Kind::Target => Kind::Target, - } - } -} - -struct OutputOptions { - /// Get the `"rendered"` field from the JSON output and display it on - /// stderr instead of the JSON message. - extract_rendered_messages: bool, - /// Look for JSON message that indicates .rmeta file is available for - /// pipelined compilation. - look_for_metadata_directive: bool, - /// Whether or not to display messages in color. - color: bool, - /// Where to write the JSON messages to support playback later if the unit - /// is fresh. The file is created lazily so that in the normal case, lots - /// of empty files are not created. This is None if caching is disabled. - cache_cell: Option<(PathBuf, LazyCell)>, -} - -impl OutputOptions { - fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions { - let extract_rendered_messages = cx.bcx.build_config.message_format != MessageFormat::Json; - let look_for_metadata_directive = cx.rmeta_required(unit); - let color = cx.bcx.config.shell().supports_color(); - let cache_cell = if cx.bcx.build_config.cache_messages() { - let path = cx.files().message_cache_path(unit); - // Remove old cache, ignore ENOENT, which is the common case. - drop(fs::remove_file(&path)); - Some((path, LazyCell::new())) - } else { - None - }; - OutputOptions { - extract_rendered_messages, - look_for_metadata_directive, - color, - cache_cell, - } - } -} - -fn on_stdout_line( - state: &JobState<'_>, - line: &str, - _package_id: PackageId, - _target: &Target, -) -> CargoResult<()> { - state.stdout(line.to_string()); - Ok(()) -} - -fn on_stderr_line( - state: &JobState<'_>, - line: &str, - package_id: PackageId, - target: &Target, - options: &mut OutputOptions, -) -> CargoResult<()> { - // Check if caching is enabled. - if let Some((path, cell)) = &mut options.cache_cell { - // Cache the output, which will be replayed later when Fresh. - let f = cell.try_borrow_mut_with(|| File::create(path))?; - debug_assert!(!line.contains('\n')); - f.write_all(line.as_bytes())?; - f.write_all(&[b'\n'])?; - } - - // We primarily want to use this function to process JSON messages from - // rustc. The compiler should always print one JSON message per line, and - // otherwise it may have other output intermingled (think RUST_LOG or - // something like that), so skip over everything that doesn't look like a - // JSON message. - if !line.starts_with('{') { - state.stderr(line.to_string()); - return Ok(()); - } - - let mut compiler_message: Box = match serde_json::from_str(line) { - Ok(msg) => msg, - - // If the compiler produced a line that started with `{` but it wasn't - // valid JSON, maybe it wasn't JSON in the first place! Forward it along - // to stderr. - Err(e) => { - debug!("failed to parse json: {:?}", e); - state.stderr(line.to_string()); - return Ok(()); - } - }; - - // In some modes of compilation Cargo switches the compiler to JSON mode - // but the user didn't request that so we still want to print pretty rustc - // colorized diagnostics. In those cases (`extract_rendered_messages`) we - // take a look at the JSON blob we go, see if it's a relevant diagnostics, - // and if so forward just that diagnostic for us to print. - if options.extract_rendered_messages { - #[derive(serde::Deserialize)] - struct CompilerMessage { - rendered: String, - } - if let Ok(mut error) = serde_json::from_str::(compiler_message.get()) { - // state.stderr will add a newline - if error.rendered.ends_with('\n') { - error.rendered.pop(); - } - let rendered = if options.color { - error.rendered - } else { - // Strip only fails if the the Writer fails, which is Cursor - // on a Vec, which should never fail. - strip_ansi_escapes::strip(&error.rendered) - .map(|v| String::from_utf8(v).expect("utf8")) - .expect("strip should never fail") - }; - state.stderr(rendered); - return Ok(()); - } - } else { - // Remove color information from the rendered string. rustc has not - // included color in the past, so to avoid breaking anything, strip it - // out when --json-rendered=termcolor is used. This runs - // unconditionally under the assumption that Cargo will eventually - // move to this as the default mode. Perhaps in the future, cargo - // could allow the user to enable/disable color (such as with a - // `--json-rendered` or `--color` or `--message-format` flag). - #[derive(serde::Deserialize, serde::Serialize)] - struct CompilerMessage { - rendered: String, - #[serde(flatten)] - other: std::collections::BTreeMap, - } - if let Ok(mut error) = serde_json::from_str::(compiler_message.get()) { - error.rendered = strip_ansi_escapes::strip(&error.rendered) - .map(|v| String::from_utf8(v).expect("utf8")) - .unwrap_or(error.rendered); - let new_line = serde_json::to_string(&error)?; - let new_msg: Box = serde_json::from_str(&new_line)?; - compiler_message = new_msg; - } - } - - // In some modes of execution we will execute rustc with `-Z - // emit-artifact-notifications` to look for metadata files being produced. When this - // happens we may be able to start subsequent compilations more quickly than - // waiting for an entire compile to finish, possibly using more parallelism - // available to complete a compilation session more quickly. - // - // In these cases look for a matching directive and inform Cargo internally - // that a metadata file has been produced. - if options.look_for_metadata_directive { - #[derive(serde::Deserialize)] - struct ArtifactNotification { - artifact: String, - } - if let Ok(artifact) = serde_json::from_str::(compiler_message.get()) { - log::trace!("found directive from rustc: `{}`", artifact.artifact); - if artifact.artifact.ends_with(".rmeta") { - log::debug!("looks like metadata finished early!"); - state.rmeta_produced(); - } - return Ok(()); - } - } - - // And failing all that above we should have a legitimate JSON diagnostic - // from the compiler, so wrap it in an external Cargo JSON message - // indicating which package it came from and then emit it. - let msg = machine_message::FromCompiler { - package_id, - target, - message: compiler_message, - } - .to_json_string(); - - // Switch json lines from rustc/rustdoc that appear on stderr to stdout - // instead. We want the stdout of Cargo to always be machine parseable as - // stderr has our colorized human-readable messages. - state.stdout(msg); - Ok(()) -} - -fn replay_output_cache( - package_id: PackageId, - target: &Target, - path: PathBuf, - format: MessageFormat, - color: bool, -) -> Work { - let target = target.clone(); - let extract_rendered_messages = match format { - MessageFormat::Human => true, - MessageFormat::Json => false, - // FIXME: short not supported. - MessageFormat::Short => false, - }; - let mut options = OutputOptions { - extract_rendered_messages, - look_for_metadata_directive: false, - color, - cache_cell: None, - }; - Work::new(move |state| { - if !path.exists() { - // No cached output, probably didn't emit anything. - return Ok(()); - } - let contents = fs::read_to_string(&path)?; - for line in contents.lines() { - on_stderr_line(state, line, package_id, &target, &mut options)?; - } - Ok(()) - }) -} +mod build_config; +mod build_context; +mod build_plan; +mod compilation; +mod context; +mod custom_build; +mod fingerprint; +mod job; +mod job_queue; +mod layout; +mod output_depinfo; +mod unit; + +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs::{self, File}; +use std::io::Write; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use failure::{bail, Error}; +use lazycell::LazyCell; +use log::debug; +use same_file::is_same_file; +use serde::Serialize; + +pub use self::build_config::{BuildConfig, CompileMode, MessageFormat}; +pub use self::build_context::{BuildContext, FileFlavor, TargetConfig, TargetInfo}; +use self::build_plan::BuildPlan; +pub use self::compilation::{Compilation, Doctest}; +pub use self::context::Context; +pub use self::custom_build::{BuildMap, BuildOutput, BuildScripts}; +pub use self::job::Freshness; +use self::job::{Job, Work}; +use self::job_queue::{JobQueue, JobState}; +pub use self::layout::is_bad_artifact_name; +use self::output_depinfo::output_depinfo; +pub use crate::core::compiler::unit::{Unit, UnitInterner}; +use crate::core::manifest::TargetSourcePath; +use crate::core::profiles::{Lto, PanicStrategy, Profile}; +use crate::core::Feature; +use crate::core::{PackageId, Target}; +use crate::util::errors::{CargoResult, CargoResultExt, Internal, ProcessError}; +use crate::util::machine_message::Message; +use crate::util::paths; +use crate::util::{self, machine_message, ProcessBuilder}; +use crate::util::{internal, join_paths, profile}; + +/// Indicates whether an object is for the host architcture or the target architecture. +/// +/// These will be the same unless cross-compiling. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, PartialOrd, Ord, Serialize)] +pub enum Kind { + Host, + Target, +} + +/// A glorified callback for executing calls to rustc. Rather than calling rustc +/// directly, we'll use an `Executor`, giving clients an opportunity to intercept +/// the build calls. +pub trait Executor: Send + Sync + 'static { + /// Called after a rustc process invocation is prepared up-front for a given + /// unit of work (may still be modified for runtime-known dependencies, when + /// the work is actually executed). + fn init<'a, 'cfg>(&self, _cx: &Context<'a, 'cfg>, _unit: &Unit<'a>) {} + + /// In case of an `Err`, Cargo will not continue with the build process for + /// this package. + fn exec( + &self, + cmd: ProcessBuilder, + id: PackageId, + target: &Target, + mode: CompileMode, + on_stdout_line: &mut dyn FnMut(&str) -> CargoResult<()>, + on_stderr_line: &mut dyn FnMut(&str) -> CargoResult<()>, + ) -> CargoResult<()>; + + /// Queried when queuing each unit of work. If it returns true, then the + /// unit will always be rebuilt, independent of whether it needs to be. + fn force_rebuild(&self, _unit: &Unit<'_>) -> bool { + false + } +} + +/// A `DefaultExecutor` calls rustc without doing anything else. It is Cargo's +/// default behaviour. +#[derive(Copy, Clone)] +pub struct DefaultExecutor; + +impl Executor for DefaultExecutor { + fn exec( + &self, + cmd: ProcessBuilder, + _id: PackageId, + _target: &Target, + _mode: CompileMode, + on_stdout_line: &mut dyn FnMut(&str) -> CargoResult<()>, + on_stderr_line: &mut dyn FnMut(&str) -> CargoResult<()>, + ) -> CargoResult<()> { + cmd.exec_with_streaming(on_stdout_line, on_stderr_line, false) + .map(drop) + } +} + +fn compile<'a, 'cfg: 'a>( + cx: &mut Context<'a, 'cfg>, + jobs: &mut JobQueue<'a, 'cfg>, + plan: &mut BuildPlan, + unit: &Unit<'a>, + exec: &Arc, + force_rebuild: bool, +) -> CargoResult<()> { + let bcx = cx.bcx; + let build_plan = bcx.build_config.build_plan; + if !cx.compiled.insert(*unit) { + return Ok(()); + } + + // Build up the work to be done to compile this unit, enqueuing it once + // we've got everything constructed. + let p = profile::start(format!("preparing: {}/{}", unit.pkg, unit.target.name())); + fingerprint::prepare_init(cx, unit)?; + cx.links.validate(bcx.resolve, unit)?; + + let job = if unit.mode.is_run_custom_build() { + custom_build::prepare(cx, unit)? + } else if unit.mode.is_doc_test() { + // We run these targets later, so this is just a no-op for now. + Job::new(Work::noop(), Freshness::Fresh) + } else if build_plan { + Job::new(rustc(cx, unit, &exec.clone())?, Freshness::Dirty) + } else { + let force = exec.force_rebuild(unit) || force_rebuild; + let mut job = fingerprint::prepare_target(cx, unit, force)?; + job.before(if job.freshness() == Freshness::Dirty { + let work = if unit.mode.is_doc() { + rustdoc(cx, unit)? + } else { + rustc(cx, unit, exec)? + }; + work.then(link_targets(cx, unit, false)?) + } else { + let work = if cx.bcx.build_config.cache_messages() + && cx.bcx.show_warnings(unit.pkg.package_id()) + { + replay_output_cache( + unit.pkg.package_id(), + unit.target, + cx.files().message_cache_path(unit), + cx.bcx.build_config.message_format, + cx.bcx.config.shell().supports_color(), + ) + } else { + Work::noop() + }; + // Need to link targets on both the dirty and fresh. + work.then(link_targets(cx, unit, true)?) + }); + + job + }; + jobs.enqueue(cx, unit, job)?; + drop(p); + + // Be sure to compile all dependencies of this target as well. + for unit in cx.dep_targets(unit).iter() { + compile(cx, jobs, plan, unit, exec, false)?; + } + if build_plan { + plan.add(cx, unit)?; + } + + Ok(()) +} + +fn rustc<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + exec: &Arc, +) -> CargoResult { + let mut rustc = prepare_rustc(cx, &unit.target.rustc_crate_types(), unit)?; + let build_plan = cx.bcx.build_config.build_plan; + + let name = unit.pkg.name().to_string(); + let buildkey = unit.buildkey(); + + add_cap_lints(cx.bcx, unit, &mut rustc); + + let outputs = cx.outputs(unit)?; + let root = cx.files().out_dir(unit); + let kind = unit.kind; + + // Prepare the native lib state (extra `-L` and `-l` flags). + let build_state = cx.build_state.clone(); + let current_id = unit.pkg.package_id(); + let build_deps = load_build_deps(cx, unit); + + // If we are a binary and the package also contains a library, then we + // don't pass the `-l` flags. + let pass_l_flag = unit.target.is_lib() || !unit.pkg.targets().iter().any(|t| t.is_lib()); + let pass_cdylib_link_args = unit.target.is_cdylib(); + let do_rename = unit.target.allows_underscores() && !unit.mode.is_any_test(); + let real_name = unit.target.name().to_string(); + let crate_name = unit.target.crate_name(); + + // Rely on `target_filenames` iterator as source of truth rather than rederiving filestem. + let rustc_dep_info_loc = if do_rename && cx.files().metadata(unit).is_none() { + root.join(&crate_name) + } else { + root.join(&cx.files().file_stem(unit)) + } + .with_extension("d"); + let dep_info_loc = fingerprint::dep_info_loc(cx, unit); + + rustc.args(cx.bcx.rustflags_args(unit)); + if cx.bcx.config.cli_unstable().binary_dep_depinfo { + rustc.arg("-Zbinary-dep-depinfo"); + } + let mut output_options = OutputOptions::new(cx, unit); + let package_id = unit.pkg.package_id(); + let target = unit.target.clone(); + let mode = unit.mode; + + exec.init(cx, unit); + let exec = exec.clone(); + + let root_output = cx.files().host_root().to_path_buf(); + let target_dir = cx.bcx.ws.target_dir().into_path_unlocked(); + let pkg_root = unit.pkg.root().to_path_buf(); + let cwd = rustc + .get_cwd() + .unwrap_or_else(|| cx.bcx.config.cwd()) + .to_path_buf(); + let fingerprint_dir = cx.files().fingerprint_dir(unit); + + return Ok(Work::new(move |state| { + // Only at runtime have we discovered what the extra -L and -l + // arguments are for native libraries, so we process those here. We + // also need to be sure to add any -L paths for our plugins to the + // dynamic library load path as a plugin's dynamic library may be + // located somewhere in there. + // Finally, if custom environment variables have been produced by + // previous build scripts, we include them in the rustc invocation. + if let Some(build_deps) = build_deps { + let build_state = build_state.outputs.lock().unwrap(); + if !build_plan { + add_native_deps( + &mut rustc, + &build_state, + &build_deps, + pass_l_flag, + pass_cdylib_link_args, + current_id, + )?; + add_plugin_deps(&mut rustc, &build_state, &build_deps, &root_output)?; + } + add_custom_env(&mut rustc, &build_state, current_id, kind)?; + } + + for output in outputs.iter() { + // If there is both an rmeta and rlib, rustc will prefer to use the + // rlib, even if it is older. Therefore, we must delete the rlib to + // force using the new rmeta. + if output.path.extension() == Some(OsStr::new("rmeta")) { + let dst = root.join(&output.path).with_extension("rlib"); + if dst.exists() { + paths::remove_file(&dst)?; + } + } + } + + fn internal_if_simple_exit_code(err: Error) -> Error { + // If a signal on unix (`code == None`) or an abnormal termination + // on Windows (codes like `0xC0000409`), don't hide the error details. + match err + .downcast_ref::() + .as_ref() + .and_then(|perr| perr.exit.and_then(|e| e.code())) + { + Some(n) if n < 128 => Internal::new(err).into(), + _ => err, + } + } + + state.running(&rustc); + let timestamp = paths::set_invocation_time(&fingerprint_dir)?; + if build_plan { + state.build_plan(buildkey, rustc.clone(), outputs.clone()); + } else { + exec.exec( + rustc, + package_id, + &target, + mode, + &mut |line| on_stdout_line(state, line, package_id, &target), + &mut |line| on_stderr_line(state, line, package_id, &target, &mut output_options), + ) + .map_err(internal_if_simple_exit_code) + .chain_err(|| format!("Could not compile `{}`.", name))?; + } + + if do_rename && real_name != crate_name { + let dst = &outputs[0].path; + let src = dst.with_file_name( + dst.file_name() + .unwrap() + .to_str() + .unwrap() + .replace(&real_name, &crate_name), + ); + if src.exists() && src.file_name() != dst.file_name() { + fs::rename(&src, &dst) + .chain_err(|| internal(format!("could not rename crate {:?}", src)))?; + } + } + + if rustc_dep_info_loc.exists() { + fingerprint::translate_dep_info( + &rustc_dep_info_loc, + &dep_info_loc, + &cwd, + &pkg_root, + &target_dir, + // Do not track source files in the fingerprint for registry dependencies. + current_id.source_id().is_path(), + ) + .chain_err(|| { + internal(format!( + "could not parse/generate dep info at: {}", + rustc_dep_info_loc.display() + )) + })?; + filetime::set_file_times(dep_info_loc, timestamp, timestamp)?; + } + + Ok(()) + })); + + // Add all relevant `-L` and `-l` flags from dependencies (now calculated and + // present in `state`) to the command provided. + fn add_native_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + pass_l_flag: bool, + pass_cdylib_link_args: bool, + current_id: PackageId, + ) -> CargoResult<()> { + for key in build_scripts.to_link.iter() { + let output = build_state.get(key).ok_or_else(|| { + internal(format!( + "couldn't find build state for {}/{:?}", + key.0, key.1 + )) + })?; + for path in output.library_paths.iter() { + rustc.arg("-L").arg(path); + } + if key.0 == current_id { + for cfg in &output.cfgs { + rustc.arg("--cfg").arg(cfg); + } + if pass_l_flag { + for name in output.library_links.iter() { + rustc.arg("-l").arg(name); + } + } + if pass_cdylib_link_args { + for arg in output.linker_args.iter() { + let link_arg = format!("link-arg={}", arg); + rustc.arg("-C").arg(link_arg); + } + } + } + } + Ok(()) + } + + // Add all custom environment variables present in `state` (after they've + // been put there by one of the `build_scripts`) to the command provided. + fn add_custom_env( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + current_id: PackageId, + kind: Kind, + ) -> CargoResult<()> { + let key = (current_id, kind); + if let Some(output) = build_state.get(&key) { + for &(ref name, ref value) in output.env.iter() { + rustc.env(name, value); + } + } + Ok(()) + } +} + +/// Link the compiled target (often of form `foo-{metadata_hash}`) to the +/// final target. This must happen during both "Fresh" and "Compile". +fn link_targets<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, + fresh: bool, +) -> CargoResult { + let bcx = cx.bcx; + let outputs = cx.outputs(unit)?; + let export_dir = cx.files().export_dir(); + let package_id = unit.pkg.package_id(); + let profile = unit.profile; + let unit_mode = unit.mode; + let features = bcx + .resolve + .features_sorted(package_id) + .into_iter() + .map(|s| s.to_owned()) + .collect(); + let json_messages = bcx.build_config.emit_json(); + let executable = cx.get_executable(unit)?; + let mut target = unit.target.clone(); + if let TargetSourcePath::Metabuild = target.src_path() { + // Give it something to serialize. + let path = unit.pkg.manifest().metabuild_path(cx.bcx.ws.target_dir()); + target.set_src_path(TargetSourcePath::Path(path)); + } + + Ok(Work::new(move |state| { + // If we're a "root crate", e.g., the target of this compilation, then we + // hard link our outputs out of the `deps` directory into the directory + // above. This means that `cargo build` will produce binaries in + // `target/debug` which one probably expects. + let mut destinations = vec![]; + for output in outputs.iter() { + let src = &output.path; + // This may have been a `cargo rustc` command which changes the + // output, so the source may not actually exist. + if !src.exists() { + continue; + } + let dst = match output.hardlink.as_ref() { + Some(dst) => dst, + None => { + destinations.push(src.clone()); + continue; + } + }; + destinations.push(dst.clone()); + hardlink_or_copy(src, dst)?; + if let Some(ref path) = output.export_path { + let export_dir = export_dir.as_ref().unwrap(); + if !export_dir.exists() { + fs::create_dir_all(export_dir)?; + } + + hardlink_or_copy(src, path)?; + } + } + + if json_messages { + let art_profile = machine_message::ArtifactProfile { + opt_level: profile.opt_level.as_str(), + debuginfo: profile.debuginfo, + debug_assertions: profile.debug_assertions, + overflow_checks: profile.overflow_checks, + test: unit_mode.is_any_test(), + }; + + let msg = machine_message::Artifact { + package_id, + target: &target, + profile: art_profile, + features, + filenames: destinations, + executable, + fresh, + } + .to_json_string(); + state.stdout(msg); + } + Ok(()) + })) +} + +fn hardlink_or_copy(src: &Path, dst: &Path) -> CargoResult<()> { + debug!("linking {} to {}", src.display(), dst.display()); + if is_same_file(src, dst).unwrap_or(false) { + return Ok(()); + } + if dst.exists() { + paths::remove_file(&dst)?; + } + + let link_result = if src.is_dir() { + #[cfg(target_os = "redox")] + use std::os::redox::fs::symlink; + #[cfg(unix)] + use std::os::unix::fs::symlink; + #[cfg(windows)] + use std::os::windows::fs::symlink_dir as symlink; + + let dst_dir = dst.parent().unwrap(); + let src = if src.starts_with(dst_dir) { + src.strip_prefix(dst_dir).unwrap() + } else { + src + }; + symlink(src, dst) + } else { + fs::hard_link(src, dst) + }; + link_result + .or_else(|err| { + debug!("link failed {}. falling back to fs::copy", err); + fs::copy(src, dst).map(|_| ()) + }) + .chain_err(|| { + format!( + "failed to link or copy `{}` to `{}`", + src.display(), + dst.display() + ) + })?; + Ok(()) +} + +fn load_build_deps(cx: &Context<'_, '_>, unit: &Unit<'_>) -> Option> { + cx.build_scripts.get(unit).cloned() +} + +// For all plugin dependencies, add their -L paths (now calculated and +// present in `state`) to the dynamic library load path for the command to +// execute. +fn add_plugin_deps( + rustc: &mut ProcessBuilder, + build_state: &BuildMap, + build_scripts: &BuildScripts, + root_output: &PathBuf, +) -> CargoResult<()> { + let var = util::dylib_path_envvar(); + let search_path = rustc.get_env(var).unwrap_or_default(); + let mut search_path = env::split_paths(&search_path).collect::>(); + for &id in build_scripts.plugins.iter() { + let output = build_state + .get(&(id, Kind::Host)) + .ok_or_else(|| internal(format!("couldn't find libs for plugin dep {}", id)))?; + search_path.append(&mut filter_dynamic_search_path( + output.library_paths.iter(), + root_output, + )); + } + let search_path = join_paths(&search_path, var)?; + rustc.env(var, &search_path); + Ok(()) +} + +// Determine paths to add to the dynamic search path from -L entries +// +// Strip off prefixes like "native=" or "framework=" and filter out directories +// **not** inside our output directory since they are likely spurious and can cause +// clashes with system shared libraries (issue #3366). +fn filter_dynamic_search_path<'a, I>(paths: I, root_output: &PathBuf) -> Vec +where + I: Iterator, +{ + let mut search_path = vec![]; + for dir in paths { + let dir = match dir.to_str() { + Some(s) => { + let mut parts = s.splitn(2, '='); + match (parts.next(), parts.next()) { + (Some("native"), Some(path)) + | (Some("crate"), Some(path)) + | (Some("dependency"), Some(path)) + | (Some("framework"), Some(path)) + | (Some("all"), Some(path)) => path.into(), + _ => dir.clone(), + } + } + None => dir.clone(), + }; + if dir.starts_with(&root_output) { + search_path.push(dir); + } else { + debug!( + "Not including path {} in runtime library search path because it is \ + outside target root {}", + dir.display(), + root_output.display() + ); + } + } + search_path +} + +fn prepare_rustc<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + crate_types: &[&str], + unit: &Unit<'a>, +) -> CargoResult { + let is_primary = cx.is_primary_package(unit); + + let mut base = cx + .compilation + .rustc_process(unit.pkg, unit.target, is_primary)?; + base.inherit_jobserver(&cx.jobserver); + build_base_args(cx, &mut base, unit, crate_types)?; + build_deps_args(&mut base, cx, unit)?; + Ok(base) +} + +fn rustdoc<'a, 'cfg>(cx: &mut Context<'a, 'cfg>, unit: &Unit<'a>) -> CargoResult { + let bcx = cx.bcx; + let mut rustdoc = cx.compilation.rustdoc_process(unit.pkg, unit.target)?; + rustdoc.inherit_jobserver(&cx.jobserver); + rustdoc.arg("--crate-name").arg(&unit.target.crate_name()); + add_path_args(bcx, unit, &mut rustdoc); + add_cap_lints(bcx, unit, &mut rustdoc); + add_color(bcx, &mut rustdoc); + + if unit.kind != Kind::Host { + if let Some(ref target) = bcx.build_config.requested_target { + rustdoc.arg("--target").arg(target); + } + } + + let doc_dir = cx.files().out_dir(unit); + + // Create the documentation directory ahead of time as rustdoc currently has + // a bug where concurrent invocations will race to create this directory if + // it doesn't already exist. + fs::create_dir_all(&doc_dir)?; + + rustdoc.arg("-o").arg(doc_dir); + + // Need to keep a correct order on the features, so get the sorted name first, + // then resolve the specified platform. + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + if let Some(platform) = bcx.resolve.features(unit.pkg.package_id()).get(feat) { + if bcx.platform_activated(platform.as_ref(), unit.kind) { + rustdoc.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + } else { + bail!("Failed to get the target for the feature `{}`", feat); + } + } + + add_error_format(cx, &mut rustdoc, false, false)?; + + if let Some(args) = bcx.extra_args_for(unit) { + rustdoc.args(args); + } + + build_deps_args(&mut rustdoc, cx, unit)?; + + rustdoc.args(bcx.rustdocflags_args(unit)); + + let name = unit.pkg.name().to_string(); + let build_state = cx.build_state.clone(); + let key = (unit.pkg.package_id(), unit.kind); + let package_id = unit.pkg.package_id(); + let target = unit.target.clone(); + let mut output_options = OutputOptions::new(cx, unit); + + Ok(Work::new(move |state| { + if let Some(output) = build_state.outputs.lock().unwrap().get(&key) { + for cfg in output.cfgs.iter() { + rustdoc.arg("--cfg").arg(cfg); + } + for &(ref name, ref value) in output.env.iter() { + rustdoc.env(name, value); + } + } + state.running(&rustdoc); + + rustdoc + .exec_with_streaming( + &mut |line| on_stdout_line(state, line, package_id, &target), + &mut |line| on_stderr_line(state, line, package_id, &target, &mut output_options), + false, + ) + .chain_err(|| format!("Could not document `{}`.", name))?; + Ok(()) + })) +} + +// The path that we pass to rustc is actually fairly important because it will +// show up in error messages (important for readability), debug information +// (important for caching), etc. As a result we need to be pretty careful how we +// actually invoke rustc. +// +// In general users don't expect `cargo build` to cause rebuilds if you change +// directories. That could be if you just change directories in the package or +// if you literally move the whole package wholesale to a new directory. As a +// result we mostly don't factor in `cwd` to this calculation. Instead we try to +// track the workspace as much as possible and we update the current directory +// of rustc/rustdoc where appropriate. +// +// The first returned value here is the argument to pass to rustc, and the +// second is the cwd that rustc should operate in. +fn path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>) -> (PathBuf, PathBuf) { + let ws_root = bcx.ws.root(); + let src = match unit.target.src_path() { + TargetSourcePath::Path(path) => path.to_path_buf(), + TargetSourcePath::Metabuild => unit.pkg.manifest().metabuild_path(bcx.ws.target_dir()), + }; + assert!(src.is_absolute()); + if unit.pkg.package_id().source_id().is_path() { + if let Ok(path) = src.strip_prefix(ws_root) { + return (path.to_path_buf(), ws_root.to_path_buf()); + } + } + (src, unit.pkg.root().to_path_buf()) +} + +fn add_path_args(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { + let (arg, cwd) = path_args(bcx, unit); + cmd.arg(arg); + cmd.cwd(cwd); +} + +fn add_cap_lints(bcx: &BuildContext<'_, '_>, unit: &Unit<'_>, cmd: &mut ProcessBuilder) { + // If this is an upstream dep we don't want warnings from, turn off all + // lints. + if !bcx.show_warnings(unit.pkg.package_id()) { + cmd.arg("--cap-lints").arg("allow"); + + // If this is an upstream dep but we *do* want warnings, make sure that they + // don't fail compilation. + } else if !unit.pkg.package_id().source_id().is_path() { + cmd.arg("--cap-lints").arg("warn"); + } +} + +fn add_color(bcx: &BuildContext<'_, '_>, cmd: &mut ProcessBuilder) { + let shell = bcx.config.shell(); + let color = if shell.supports_color() { + "always" + } else { + "never" + }; + cmd.args(&["--color", color]); +} + +/// Add error-format flags to the command. +/// +/// This is rather convoluted right now. The general overview is: +/// - If -Zcache-messages or `build.pipelining` is enabled, Cargo always uses +/// JSON output. This has several benefits, such as being easier to parse, +/// handles changing formats (for replaying cached messages), ensures +/// atomic output (so messages aren't interleaved), etc. +/// - `supports_termcolor` is a temporary flag. rustdoc does not yet support +/// the `--json-rendered` flag, but it is intended to fix that soon. +/// - `short` output is not yet supported for JSON output. We haven't yet +/// decided how this problem will be resolved. Probably either adding +/// "short" to the JSON output, or more ambitiously moving diagnostic +/// rendering to an external library that Cargo can share with rustc. +/// +/// It is intended in the future that Cargo *always* uses the JSON output, and +/// this function can be simplified. The above issues need to be resolved, the +/// flags need to be stabilized, and we need more testing to ensure there +/// aren't any regressions. +fn add_error_format( + cx: &Context<'_, '_>, + cmd: &mut ProcessBuilder, + pipelined: bool, + supports_termcolor: bool, +) -> CargoResult<()> { + // If this unit is producing a required rmeta file then we need to know + // when the rmeta file is ready so we can signal to the rest of Cargo that + // it can continue dependent compilations. To do this we are currently + // required to switch the compiler into JSON message mode, but we still + // want to present human readable errors as well. (this rabbit hole just + // goes and goes) + // + // All that means is that if we're not already in JSON mode we need to + // switch to JSON mode, ensure that rustc error messages can be rendered + // prettily, and then when parsing JSON messages from rustc we need to + // internally understand that we should extract the `rendered` field and + // present it if we can. + if cx.bcx.build_config.cache_messages() || pipelined { + cmd.arg("--error-format=json").arg("-Zunstable-options"); + if supports_termcolor { + cmd.arg("--json-rendered=termcolor"); + } + if cx.bcx.build_config.message_format == MessageFormat::Short { + // FIXME(rust-lang/rust#60419): right now we have no way of + // turning on JSON messages from the compiler and also asking + // the rendered field to be in the `short` format. + bail!( + "currently `--message-format short` is incompatible with {}", + if pipelined { + "pipelined compilation" + } else { + "cached output" + } + ); + } + if pipelined { + cmd.arg("-Zemit-artifact-notifications"); + } + } else { + match cx.bcx.build_config.message_format { + MessageFormat::Human => (), + MessageFormat::Json => { + cmd.arg("--error-format").arg("json"); + } + MessageFormat::Short => { + cmd.arg("--error-format").arg("short"); + } + } + } + Ok(()) +} + +fn build_base_args<'a, 'cfg>( + cx: &mut Context<'a, 'cfg>, + cmd: &mut ProcessBuilder, + unit: &Unit<'a>, + crate_types: &[&str], +) -> CargoResult<()> { + assert!(!unit.mode.is_run_custom_build()); + + let bcx = cx.bcx; + let Profile { + ref opt_level, + ref lto, + codegen_units, + debuginfo, + debug_assertions, + overflow_checks, + rpath, + ref panic, + incremental, + .. + } = unit.profile; + let test = unit.mode.is_any_test(); + + cmd.arg("--crate-name").arg(&unit.target.crate_name()); + + add_path_args(bcx, unit, cmd); + add_color(bcx, cmd); + add_error_format(cx, cmd, cx.rmeta_required(unit), true)?; + + if !test { + for crate_type in crate_types.iter() { + cmd.arg("--crate-type").arg(crate_type); + } + } + + if unit.mode.is_check() { + cmd.arg("--emit=dep-info,metadata"); + } else if !unit.requires_upstream_objects() { + // Always produce metdata files for rlib outputs. Metadata may be used + // in this session for a pipelined compilation, or it may be used in a + // future Cargo session as part of a pipelined compile. + cmd.arg("--emit=dep-info,metadata,link"); + } else { + cmd.arg("--emit=dep-info,link"); + } + + let prefer_dynamic = (unit.target.for_host() && !unit.target.is_custom_build()) + || (crate_types.contains(&"dylib") && bcx.ws.members().any(|p| p != unit.pkg)); + if prefer_dynamic { + cmd.arg("-C").arg("prefer-dynamic"); + } + + if opt_level.as_str() != "0" { + cmd.arg("-C").arg(&format!("opt-level={}", opt_level)); + } + + if *panic != PanicStrategy::Unwind { + cmd.arg("-C").arg(format!("panic={}", panic)); + } + + // Disable LTO for host builds as prefer_dynamic and it are mutually + // exclusive. + if unit.target.can_lto() && !unit.target.for_host() { + match *lto { + Lto::Bool(false) => {} + Lto::Bool(true) => { + cmd.args(&["-C", "lto"]); + } + Lto::Named(ref s) => { + cmd.arg("-C").arg(format!("lto={}", s)); + } + } + } + + if let Some(n) = codegen_units { + // There are some restrictions with LTO and codegen-units, so we + // only add codegen units when LTO is not used. + cmd.arg("-C").arg(&format!("codegen-units={}", n)); + } + + if let Some(debuginfo) = debuginfo { + cmd.arg("-C").arg(format!("debuginfo={}", debuginfo)); + } + + if let Some(args) = bcx.extra_args_for(unit) { + cmd.args(args); + } + + // `-C overflow-checks` is implied by the setting of `-C debug-assertions`, + // so we only need to provide `-C overflow-checks` if it differs from + // the value of `-C debug-assertions` we would provide. + if opt_level.as_str() != "0" { + if debug_assertions { + cmd.args(&["-C", "debug-assertions=on"]); + if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + } else if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !debug_assertions { + cmd.args(&["-C", "debug-assertions=off"]); + if overflow_checks { + cmd.args(&["-C", "overflow-checks=on"]); + } + } else if !overflow_checks { + cmd.args(&["-C", "overflow-checks=off"]); + } + + if test && unit.target.harness() { + cmd.arg("--test"); + } else if test { + cmd.arg("--cfg").arg("test"); + } + + // We ideally want deterministic invocations of rustc to ensure that + // rustc-caching strategies like sccache are able to cache more, so sort the + // feature list here. + for feat in bcx.resolve.features_sorted(unit.pkg.package_id()) { + if let Some(platform) = bcx.resolve.features(unit.pkg.package_id()).get(feat) { + if bcx.platform_activated(platform.as_ref(), unit.kind) { + cmd.arg("--cfg").arg(&format!("feature=\"{}\"", feat)); + } + } else { + bail!("Failed to get the target for the feature `{}`", feat); + } + } + + match cx.files().metadata(unit) { + Some(m) => { + cmd.arg("-C").arg(&format!("metadata={}", m)); + cmd.arg("-C").arg(&format!("extra-filename=-{}", m)); + } + None => { + cmd.arg("-C") + .arg(&format!("metadata={}", cx.files().target_short_hash(unit))); + } + } + + if rpath { + cmd.arg("-C").arg("rpath"); + } + + cmd.arg("--out-dir").arg(&cx.files().out_dir(unit)); + + fn opt(cmd: &mut ProcessBuilder, key: &str, prefix: &str, val: Option<&OsStr>) { + if let Some(val) = val { + let mut joined = OsString::from(prefix); + joined.push(val); + cmd.arg(key).arg(joined); + } + } + + if unit.kind == Kind::Target { + opt( + cmd, + "--target", + "", + bcx.build_config + .requested_target + .as_ref() + .map(|s| s.as_ref()), + ); + } + + opt(cmd, "-C", "ar=", bcx.ar(unit.kind).map(|s| s.as_ref())); + opt( + cmd, + "-C", + "linker=", + bcx.linker(unit.kind).map(|s| s.as_ref()), + ); + if incremental { + let dir = cx.files().layout(unit.kind).incremental().as_os_str(); + opt(cmd, "-C", "incremental=", Some(dir)); + } + Ok(()) +} + +fn build_deps_args<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + unit: &Unit<'a>, +) -> CargoResult<()> { + let bcx = cx.bcx; + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().deps_dir(unit)); + deps + }); + + // Be sure that the host path is also listed. This'll ensure that proc macro + // dependencies are correctly found (for reexported macros). + if let Kind::Target = unit.kind { + cmd.arg("-L").arg(&{ + let mut deps = OsString::from("dependency="); + deps.push(cx.files().host_deps()); + deps + }); + } + + let dep_targets = cx.dep_targets(unit); + + // If there is not one linkable target but should, rustc fails later + // on if there is an `extern crate` for it. This may turn into a hard + // error in the future (see PR #4797). + if !dep_targets + .iter() + .any(|u| !u.mode.is_doc() && u.target.linkable()) + { + if let Some(u) = dep_targets + .iter() + .find(|u| !u.mode.is_doc() && u.target.is_lib()) + { + bcx.config.shell().warn(format!( + "The package `{}` \ + provides no linkable target. The compiler might raise an error while compiling \ + `{}`. Consider adding 'dylib' or 'rlib' to key `crate-type` in `{}`'s \ + Cargo.toml. This warning might turn into a hard error in the future.", + u.target.crate_name(), + unit.target.crate_name(), + u.target.crate_name() + ))?; + } + } + + let mut unstable_opts = false; + + for dep in dep_targets { + if dep.mode.is_run_custom_build() { + cmd.env("OUT_DIR", &cx.files().build_script_out_dir(&dep)); + } + if dep.target.linkable() && !dep.mode.is_doc() { + link_to(cmd, cx, unit, &dep, &mut unstable_opts)?; + } + } + + // This will only be set if we're already using a feature + // requiring nightly rust + if unstable_opts { + cmd.arg("-Z").arg("unstable-options"); + } + + return Ok(()); + + fn link_to<'a, 'cfg>( + cmd: &mut ProcessBuilder, + cx: &mut Context<'a, 'cfg>, + current: &Unit<'a>, + dep: &Unit<'a>, + need_unstable_opts: &mut bool, + ) -> CargoResult<()> { + let bcx = cx.bcx; + + let mut value = OsString::new(); + value.push(bcx.extern_crate_name(current, dep)?); + value.push("="); + + let mut pass = |file| { + let mut value = value.clone(); + value.push(file); + + if current + .pkg + .manifest() + .features() + .require(Feature::public_dependency()) + .is_ok() + && !bcx.is_public_dependency(current, dep) + { + cmd.arg("--extern-private"); + *need_unstable_opts = true; + } else { + cmd.arg("--extern"); + } + + cmd.arg(&value); + }; + + let outputs = cx.outputs(dep)?; + let mut outputs = outputs.iter().filter_map(|output| match output.flavor { + FileFlavor::Linkable { rmeta } => Some((output, rmeta)), + _ => None, + }); + + if cx.only_requires_rmeta(current, dep) { + let (output, _rmeta) = outputs + .find(|(_output, rmeta)| *rmeta) + .expect("failed to find rlib dep for pipelined dep"); + pass(&output.path); + } else { + for (output, rmeta) in outputs { + if !rmeta { + pass(&output.path); + } + } + } + Ok(()) + } +} + +fn envify(s: &str) -> String { + s.chars() + .flat_map(|c| c.to_uppercase()) + .map(|c| if c == '-' { '_' } else { c }) + .collect() +} + +impl Kind { + fn for_target(self, target: &Target) -> Kind { + // Once we start compiling for the `Host` kind we continue doing so, but + // if we are a `Target` kind and then we start compiling for a target + // that needs to be on the host we lift ourselves up to `Host`. + match self { + Kind::Host => Kind::Host, + Kind::Target if target.for_host() => Kind::Host, + Kind::Target => Kind::Target, + } + } +} + +struct OutputOptions { + /// Get the `"rendered"` field from the JSON output and display it on + /// stderr instead of the JSON message. + extract_rendered_messages: bool, + /// Look for JSON message that indicates .rmeta file is available for + /// pipelined compilation. + look_for_metadata_directive: bool, + /// Whether or not to display messages in color. + color: bool, + /// Where to write the JSON messages to support playback later if the unit + /// is fresh. The file is created lazily so that in the normal case, lots + /// of empty files are not created. This is None if caching is disabled. + cache_cell: Option<(PathBuf, LazyCell)>, +} + +impl OutputOptions { + fn new<'a>(cx: &Context<'a, '_>, unit: &Unit<'a>) -> OutputOptions { + let extract_rendered_messages = cx.bcx.build_config.message_format != MessageFormat::Json; + let look_for_metadata_directive = cx.rmeta_required(unit); + let color = cx.bcx.config.shell().supports_color(); + let cache_cell = if cx.bcx.build_config.cache_messages() { + let path = cx.files().message_cache_path(unit); + // Remove old cache, ignore ENOENT, which is the common case. + drop(fs::remove_file(&path)); + Some((path, LazyCell::new())) + } else { + None + }; + OutputOptions { + extract_rendered_messages, + look_for_metadata_directive, + color, + cache_cell, + } + } +} + +fn on_stdout_line( + state: &JobState<'_>, + line: &str, + _package_id: PackageId, + _target: &Target, +) -> CargoResult<()> { + state.stdout(line.to_string()); + Ok(()) +} + +fn on_stderr_line( + state: &JobState<'_>, + line: &str, + package_id: PackageId, + target: &Target, + options: &mut OutputOptions, +) -> CargoResult<()> { + // Check if caching is enabled. + if let Some((path, cell)) = &mut options.cache_cell { + // Cache the output, which will be replayed later when Fresh. + let f = cell.try_borrow_mut_with(|| File::create(path))?; + debug_assert!(!line.contains('\n')); + f.write_all(line.as_bytes())?; + f.write_all(&[b'\n'])?; + } + + // We primarily want to use this function to process JSON messages from + // rustc. The compiler should always print one JSON message per line, and + // otherwise it may have other output intermingled (think RUST_LOG or + // something like that), so skip over everything that doesn't look like a + // JSON message. + if !line.starts_with('{') { + state.stderr(line.to_string()); + return Ok(()); + } + + let mut compiler_message: Box = match serde_json::from_str(line) { + Ok(msg) => msg, + + // If the compiler produced a line that started with `{` but it wasn't + // valid JSON, maybe it wasn't JSON in the first place! Forward it along + // to stderr. + Err(e) => { + debug!("failed to parse json: {:?}", e); + state.stderr(line.to_string()); + return Ok(()); + } + }; + + // In some modes of compilation Cargo switches the compiler to JSON mode + // but the user didn't request that so we still want to print pretty rustc + // colorized diagnostics. In those cases (`extract_rendered_messages`) we + // take a look at the JSON blob we go, see if it's a relevant diagnostics, + // and if so forward just that diagnostic for us to print. + if options.extract_rendered_messages { + #[derive(serde::Deserialize)] + struct CompilerMessage { + rendered: String, + } + if let Ok(mut error) = serde_json::from_str::(compiler_message.get()) { + // state.stderr will add a newline + if error.rendered.ends_with('\n') { + error.rendered.pop(); + } + let rendered = if options.color { + error.rendered + } else { + // Strip only fails if the the Writer fails, which is Cursor + // on a Vec, which should never fail. + strip_ansi_escapes::strip(&error.rendered) + .map(|v| String::from_utf8(v).expect("utf8")) + .expect("strip should never fail") + }; + state.stderr(rendered); + return Ok(()); + } + } else { + // Remove color information from the rendered string. rustc has not + // included color in the past, so to avoid breaking anything, strip it + // out when --json-rendered=termcolor is used. This runs + // unconditionally under the assumption that Cargo will eventually + // move to this as the default mode. Perhaps in the future, cargo + // could allow the user to enable/disable color (such as with a + // `--json-rendered` or `--color` or `--message-format` flag). + #[derive(serde::Deserialize, serde::Serialize)] + struct CompilerMessage { + rendered: String, + #[serde(flatten)] + other: std::collections::BTreeMap, + } + if let Ok(mut error) = serde_json::from_str::(compiler_message.get()) { + error.rendered = strip_ansi_escapes::strip(&error.rendered) + .map(|v| String::from_utf8(v).expect("utf8")) + .unwrap_or(error.rendered); + let new_line = serde_json::to_string(&error)?; + let new_msg: Box = serde_json::from_str(&new_line)?; + compiler_message = new_msg; + } + } + + // In some modes of execution we will execute rustc with `-Z + // emit-artifact-notifications` to look for metadata files being produced. When this + // happens we may be able to start subsequent compilations more quickly than + // waiting for an entire compile to finish, possibly using more parallelism + // available to complete a compilation session more quickly. + // + // In these cases look for a matching directive and inform Cargo internally + // that a metadata file has been produced. + if options.look_for_metadata_directive { + #[derive(serde::Deserialize)] + struct ArtifactNotification { + artifact: String, + } + if let Ok(artifact) = serde_json::from_str::(compiler_message.get()) { + log::trace!("found directive from rustc: `{}`", artifact.artifact); + if artifact.artifact.ends_with(".rmeta") { + log::debug!("looks like metadata finished early!"); + state.rmeta_produced(); + } + return Ok(()); + } + } + + // And failing all that above we should have a legitimate JSON diagnostic + // from the compiler, so wrap it in an external Cargo JSON message + // indicating which package it came from and then emit it. + let msg = machine_message::FromCompiler { + package_id, + target, + message: compiler_message, + } + .to_json_string(); + + // Switch json lines from rustc/rustdoc that appear on stderr to stdout + // instead. We want the stdout of Cargo to always be machine parseable as + // stderr has our colorized human-readable messages. + state.stdout(msg); + Ok(()) +} + +fn replay_output_cache( + package_id: PackageId, + target: &Target, + path: PathBuf, + format: MessageFormat, + color: bool, +) -> Work { + let target = target.clone(); + let extract_rendered_messages = match format { + MessageFormat::Human => true, + MessageFormat::Json => false, + // FIXME: short not supported. + MessageFormat::Short => false, + }; + let mut options = OutputOptions { + extract_rendered_messages, + look_for_metadata_directive: false, + color, + cache_cell: None, + }; + Work::new(move |state| { + if !path.exists() { + // No cached output, probably didn't emit anything. + return Ok(()); + } + let contents = fs::read_to_string(&path)?; + for line in contents.lines() { + on_stderr_line(state, line, package_id, &target, &mut options)?; + } + Ok(()) + }) +} diff --git a/src/cargo/core/dependency.rs b/src/cargo/core/dependency.rs index 1da54590ef4..9ceceab1d8a 100644 --- a/src/cargo/core/dependency.rs +++ b/src/cargo/core/dependency.rs @@ -1,502 +1,451 @@ -use std::fmt; -use std::rc::Rc; -use std::str::FromStr; - -use log::trace; -use semver::ReqParseError; -use semver::VersionReq; -use serde::ser; -use serde::Serialize; -use url::Url; - -use crate::core::interning::InternedString; -use crate::core::{PackageId, SourceId, Summary}; -use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::{Cfg, CfgExpr, Config}; - -/// Information about a dependency requested by a Cargo manifest. -/// Cheap to copy. -#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] -pub struct Dependency { - inner: Rc, -} - -/// The data underlying a `Dependency`. -#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] -struct Inner { - name: InternedString, - source_id: SourceId, - /// Source ID for the registry as specified in the manifest. - /// - /// This will be None if it is not specified (crates.io dependency). - /// This is different from `source_id` for example when both a `path` and - /// `registry` is specified. Or in the case of a crates.io dependency, - /// `source_id` will be crates.io and this will be None. - registry_id: Option, - req: VersionReq, - specified_req: bool, - kind: Kind, - only_match_name: bool, - explicit_name_in_toml: Option, - - optional: bool, - public: bool, - default_features: bool, - features: Vec, - - // This dependency should be used only for this platform. - // `None` means *all platforms*. - platform: Option, -} - -#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] -pub enum Platform { - Name(String), - Cfg(CfgExpr), -} - -#[derive(Serialize)] -struct SerializedDependency<'a> { - name: &'a str, - source: SourceId, - req: String, - kind: Kind, - rename: Option<&'a str>, - - optional: bool, - uses_default_features: bool, - features: &'a [InternedString], - target: Option<&'a Platform>, - /// The registry URL this dependency is from. - /// If None, then it comes from the default registry (crates.io). - registry: Option, -} - -impl ser::Serialize for Dependency { - fn serialize(&self, s: S) -> Result - where - S: ser::Serializer, - { - SerializedDependency { - name: &*self.package_name(), - source: self.source_id(), - req: self.version_req().to_string(), - kind: self.kind(), - optional: self.is_optional(), - uses_default_features: self.uses_default_features(), - features: self.features(), - target: self.platform(), - rename: self.explicit_name_in_toml().map(|s| s.as_str()), - registry: self.registry_id().map(|sid| sid.url().clone()), - } - .serialize(s) - } -} - -#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)] -pub enum Kind { - Normal, - Development, - Build, -} - -fn parse_req_with_deprecated( - name: &str, - req: &str, - extra: Option<(PackageId, &Config)>, -) -> CargoResult { - match VersionReq::parse(req) { - Err(ReqParseError::DeprecatedVersionRequirement(requirement)) => { - let (inside, config) = match extra { - Some(pair) => pair, - None => return Err(ReqParseError::DeprecatedVersionRequirement(requirement).into()), - }; - let msg = format!( - "\ -parsed version requirement `{}` is no longer valid - -Previous versions of Cargo accepted this malformed requirement, -but it is being deprecated. This was found when parsing the manifest -of {} {}, and the correct version requirement is `{}`. - -This will soon become a hard error, so it's either recommended to -update to a fixed version or contact the upstream maintainer about -this warning. -", - req, - inside.name(), - inside.version(), - requirement - ); - config.shell().warn(&msg)?; - - Ok(requirement) - } - Err(e) => { - let err: CargoResult = Err(e.into()); - let v: VersionReq = err.chain_err(|| { - format!( - "failed to parse the version requirement `{}` for dependency `{}`", - req, name - ) - })?; - Ok(v) - } - Ok(v) => Ok(v), - } -} - -impl ser::Serialize for Kind { - fn serialize(&self, s: S) -> Result - where - S: ser::Serializer, - { - match *self { - Kind::Normal => None, - Kind::Development => Some("dev"), - Kind::Build => Some("build"), - } - .serialize(s) - } -} - -impl Dependency { - /// Attempt to create a `Dependency` from an entry in the manifest. - pub fn parse( - name: &str, - version: Option<&str>, - source_id: SourceId, - inside: PackageId, - config: &Config, - ) -> CargoResult { - let arg = Some((inside, config)); - let (specified_req, version_req) = match version { - Some(v) => (true, parse_req_with_deprecated(name, v, arg)?), - None => (false, VersionReq::any()), - }; - - let mut ret = Dependency::new_override(name, source_id); - { - let ptr = Rc::make_mut(&mut ret.inner); - ptr.only_match_name = false; - ptr.req = version_req; - ptr.specified_req = specified_req; - } - Ok(ret) - } - - /// Attempt to create a `Dependency` from an entry in the manifest. - pub fn parse_no_deprecated( - name: &str, - version: Option<&str>, - source_id: SourceId, - ) -> CargoResult { - let (specified_req, version_req) = match version { - Some(v) => (true, parse_req_with_deprecated(name, v, None)?), - None => (false, VersionReq::any()), - }; - - let mut ret = Dependency::new_override(name, source_id); - { - let ptr = Rc::make_mut(&mut ret.inner); - ptr.only_match_name = false; - ptr.req = version_req; - ptr.specified_req = specified_req; - } - Ok(ret) - } - - pub fn new_override(name: &str, source_id: SourceId) -> Dependency { - assert!(!name.is_empty()); - Dependency { - inner: Rc::new(Inner { - name: InternedString::new(name), - source_id, - registry_id: None, - req: VersionReq::any(), - kind: Kind::Normal, - only_match_name: true, - optional: false, - public: false, - features: Vec::new(), - default_features: true, - specified_req: false, - platform: None, - explicit_name_in_toml: None, - }), - } - } - - pub fn version_req(&self) -> &VersionReq { - &self.inner.req - } - - /// This is the name of this `Dependency` as listed in `Cargo.toml`. - /// - /// Or in other words, this is what shows up in the `[dependencies]` section - /// on the left hand side. This is *not* the name of the package that's - /// being depended on as the dependency can be renamed. For that use - /// `package_name` below. - /// - /// Both of the dependencies below return `foo` for `name_in_toml`: - /// - /// ```toml - /// [dependencies] - /// foo = "0.1" - /// ``` - /// - /// and ... - /// - /// ```toml - /// [dependencies] - /// foo = { version = "0.1", package = 'bar' } - /// ``` - pub fn name_in_toml(&self) -> InternedString { - self.explicit_name_in_toml().unwrap_or(self.inner.name) - } - - /// The name of the package that this `Dependency` depends on. - /// - /// Usually this is what's written on the left hand side of a dependencies - /// section, but it can also be renamed via the `package` key. - /// - /// Both of the dependencies below return `foo` for `package_name`: - /// - /// ```toml - /// [dependencies] - /// foo = "0.1" - /// ``` - /// - /// and ... - /// - /// ```toml - /// [dependencies] - /// bar = { version = "0.1", package = 'foo' } - /// ``` - pub fn package_name(&self) -> InternedString { - self.inner.name - } - - pub fn source_id(&self) -> SourceId { - self.inner.source_id - } - - pub fn registry_id(&self) -> Option { - self.inner.registry_id - } - - pub fn set_registry_id(&mut self, registry_id: SourceId) -> &mut Dependency { - Rc::make_mut(&mut self.inner).registry_id = Some(registry_id); - self - } - - pub fn kind(&self) -> Kind { - self.inner.kind - } - - pub fn is_public(&self) -> bool { - self.inner.public - } - - /// Sets whether the dependency is public. - pub fn set_public(&mut self, public: bool) -> &mut Dependency { - if public { - // Setting 'public' only makes sense for normal dependencies - assert_eq!(self.kind(), Kind::Normal); - } - Rc::make_mut(&mut self.inner).public = public; - self - } - - pub fn specified_req(&self) -> bool { - self.inner.specified_req - } - - /// If none, this dependencies must be built for all platforms. - /// If some, it must only be built for the specified platform. - pub fn platform(&self) -> Option<&Platform> { - self.inner.platform.as_ref() - } - - /// The renamed name of this dependency, if any. - /// - /// If the `package` key is used in `Cargo.toml` then this returns the same - /// value as `name_in_toml`. - pub fn explicit_name_in_toml(&self) -> Option { - self.inner.explicit_name_in_toml - } - - pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency { - if self.is_public() { - // Setting 'public' only makes sense for normal dependencies - assert_eq!(kind, Kind::Normal); - } - Rc::make_mut(&mut self.inner).kind = kind; - self - } - - /// Sets the list of features requested for the package. - pub fn set_features( - &mut self, - features: impl IntoIterator>, - ) -> &mut Dependency { - Rc::make_mut(&mut self.inner).features = features - .into_iter() - .map(|s| InternedString::new(s.as_ref())) - .collect(); - self - } - - /// Sets whether the dependency requests default features of the package. - pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency { - Rc::make_mut(&mut self.inner).default_features = default_features; - self - } - - /// Sets whether the dependency is optional. - pub fn set_optional(&mut self, optional: bool) -> &mut Dependency { - Rc::make_mut(&mut self.inner).optional = optional; - self - } - - /// Sets the source ID for this dependency. - pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency { - Rc::make_mut(&mut self.inner).source_id = id; - self - } - - /// Sets the version requirement for this dependency. - pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency { - Rc::make_mut(&mut self.inner).req = req; - self - } - - pub fn set_platform(&mut self, platform: Option) -> &mut Dependency { - Rc::make_mut(&mut self.inner).platform = platform; - self - } - - pub fn set_explicit_name_in_toml(&mut self, name: &str) -> &mut Dependency { - Rc::make_mut(&mut self.inner).explicit_name_in_toml = Some(InternedString::new(name)); - self - } - - /// Locks this dependency to depending on the specified package ID. - pub fn lock_to(&mut self, id: PackageId) -> &mut Dependency { - assert_eq!(self.inner.source_id, id.source_id()); - assert!(self.inner.req.matches(id.version())); - trace!( - "locking dep from `{}` with `{}` at {} to {}", - self.package_name(), - self.version_req(), - self.source_id(), - id - ); - self.set_version_req(VersionReq::exact(id.version())) - .set_source_id(id.source_id()) - } - - /// Returns `true` if this is a "locked" dependency, basically whether it has - /// an exact version req. - pub fn is_locked(&self) -> bool { - // Kind of a hack to figure this out, but it works! - self.inner.req.to_string().starts_with('=') - } - - /// Returns `false` if the dependency is only used to build the local package. - pub fn is_transitive(&self) -> bool { - match self.inner.kind { - Kind::Normal | Kind::Build => true, - Kind::Development => false, - } - } - - pub fn is_build(&self) -> bool { - match self.inner.kind { - Kind::Build => true, - _ => false, - } - } - - pub fn is_optional(&self) -> bool { - self.inner.optional - } - - /// Returns `true` if the default features of the dependency are requested. - pub fn uses_default_features(&self) -> bool { - self.inner.default_features - } - /// Returns the list of features that are requested by the dependency. - pub fn features(&self) -> &[InternedString] { - &self.inner.features - } - - /// Returns `true` if the package (`sum`) can fulfill this dependency request. - pub fn matches(&self, sum: &Summary) -> bool { - self.matches_id(sum.package_id()) - } - - /// Returns `true` if the package (`id`) can fulfill this dependency request. - pub fn matches_ignoring_source(&self, id: PackageId) -> bool { - self.package_name() == id.name() && self.version_req().matches(id.version()) - } - - /// Returns `true` if the package (`id`) can fulfill this dependency request. - pub fn matches_id(&self, id: PackageId) -> bool { - self.inner.name == id.name() - && (self.inner.only_match_name - || (self.inner.req.matches(id.version()) && self.inner.source_id == id.source_id())) - } - - pub fn map_source(mut self, to_replace: SourceId, replace_with: SourceId) -> Dependency { - if self.source_id() != to_replace { - self - } else { - self.set_source_id(replace_with); - self - } - } -} - -impl Platform { - pub fn matches(&self, name: &str, cfg: &[Cfg]) -> bool { - match *self { - Platform::Name(ref p) => p == name, - Platform::Cfg(ref p) => p.matches(cfg), - } - } -} - -impl ser::Serialize for Platform { - fn serialize(&self, s: S) -> Result - where - S: ser::Serializer, - { - self.to_string().serialize(s) - } -} - -impl FromStr for Platform { - type Err = failure::Error; - - fn from_str(s: &str) -> CargoResult { - if s.starts_with("cfg(") && s.ends_with(')') { - let s = &s[4..s.len() - 1]; - let p = s.parse().map(Platform::Cfg).chain_err(|| { - failure::format_err!("failed to parse `{}` as a cfg expression", s) - })?; - Ok(p) - } else { - Ok(Platform::Name(s.to_string())) - } - } -} - -impl fmt::Display for Platform { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Platform::Name(ref n) => n.fmt(f), - Platform::Cfg(ref e) => write!(f, "cfg({})", e), - } - } -} +use std::rc::Rc; + +use log::trace; +use semver::ReqParseError; +use semver::VersionReq; +use serde::ser; +use serde::Serialize; +use url::Url; + +use crate::core::interning::InternedString; +use crate::core::{PackageId, SourceId, Summary}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::{Config, Platform}; + +/// Information about a dependency requested by a Cargo manifest. +/// Cheap to copy. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +pub struct Dependency { + inner: Rc, +} + +/// The data underlying a `Dependency`. +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug)] +struct Inner { + name: InternedString, + source_id: SourceId, + /// Source ID for the registry as specified in the manifest. + /// + /// This will be None if it is not specified (crates.io dependency). + /// This is different from `source_id` for example when both a `path` and + /// `registry` is specified. Or in the case of a crates.io dependency, + /// `source_id` will be crates.io and this will be None. + registry_id: Option, + req: VersionReq, + specified_req: bool, + kind: Kind, + only_match_name: bool, + explicit_name_in_toml: Option, + + optional: bool, + public: bool, + default_features: bool, + features: Vec, + + // This dependency should be used only for this platform. + // `None` means *all platforms*. + platform: Option, +} + +#[derive(Serialize)] +struct SerializedDependency<'a> { + name: &'a str, + source: SourceId, + req: String, + kind: Kind, + rename: Option<&'a str>, + + optional: bool, + uses_default_features: bool, + features: &'a [InternedString], + target: Option<&'a Platform>, + /// The registry URL this dependency is from. + /// If None, then it comes from the default registry (crates.io). + registry: Option, +} + +impl ser::Serialize for Dependency { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + SerializedDependency { + name: &*self.package_name(), + source: self.source_id(), + req: self.version_req().to_string(), + kind: self.kind(), + optional: self.is_optional(), + uses_default_features: self.uses_default_features(), + features: self.features(), + target: self.platform(), + rename: self.explicit_name_in_toml().map(|s| s.as_str()), + registry: self.registry_id().map(|sid| sid.url().clone()), + } + .serialize(s) + } +} + +#[derive(PartialEq, Eq, Hash, Ord, PartialOrd, Clone, Debug, Copy)] +pub enum Kind { + Normal, + Development, + Build, +} + +fn parse_req_with_deprecated( + name: &str, + req: &str, + extra: Option<(PackageId, &Config)>, +) -> CargoResult { + match VersionReq::parse(req) { + Err(ReqParseError::DeprecatedVersionRequirement(requirement)) => { + let (inside, config) = match extra { + Some(pair) => pair, + None => return Err(ReqParseError::DeprecatedVersionRequirement(requirement).into()), + }; + let msg = format!( + "\ +parsed version requirement `{}` is no longer valid + +Previous versions of Cargo accepted this malformed requirement, +but it is being deprecated. This was found when parsing the manifest +of {} {}, and the correct version requirement is `{}`. + +This will soon become a hard error, so it's either recommended to +update to a fixed version or contact the upstream maintainer about +this warning. +", + req, + inside.name(), + inside.version(), + requirement + ); + config.shell().warn(&msg)?; + + Ok(requirement) + } + Err(e) => { + let err: CargoResult = Err(e.into()); + let v: VersionReq = err.chain_err(|| { + format!( + "failed to parse the version requirement `{}` for dependency `{}`", + req, name + ) + })?; + Ok(v) + } + Ok(v) => Ok(v), + } +} + +impl ser::Serialize for Kind { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + Kind::Normal => None, + Kind::Development => Some("dev"), + Kind::Build => Some("build"), + } + .serialize(s) + } +} + +impl Dependency { + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse( + name: &str, + version: Option<&str>, + source_id: SourceId, + inside: PackageId, + config: &Config, + ) -> CargoResult { + let arg = Some((inside, config)); + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(name, v, arg)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + /// Attempt to create a `Dependency` from an entry in the manifest. + pub fn parse_no_deprecated( + name: &str, + version: Option<&str>, + source_id: SourceId, + ) -> CargoResult { + let (specified_req, version_req) = match version { + Some(v) => (true, parse_req_with_deprecated(name, v, None)?), + None => (false, VersionReq::any()), + }; + + let mut ret = Dependency::new_override(name, source_id); + { + let ptr = Rc::make_mut(&mut ret.inner); + ptr.only_match_name = false; + ptr.req = version_req; + ptr.specified_req = specified_req; + } + Ok(ret) + } + + pub fn new_override(name: &str, source_id: SourceId) -> Dependency { + assert!(!name.is_empty()); + Dependency { + inner: Rc::new(Inner { + name: InternedString::new(name), + source_id, + registry_id: None, + req: VersionReq::any(), + kind: Kind::Normal, + only_match_name: true, + optional: false, + public: false, + features: Vec::new(), + default_features: true, + specified_req: false, + platform: None, + explicit_name_in_toml: None, + }), + } + } + + pub fn version_req(&self) -> &VersionReq { + &self.inner.req + } + + /// This is the name of this `Dependency` as listed in `Cargo.toml`. + /// + /// Or in other words, this is what shows up in the `[dependencies]` section + /// on the left hand side. This is *not* the name of the package that's + /// being depended on as the dependency can be renamed. For that use + /// `package_name` below. + /// + /// Both of the dependencies below return `foo` for `name_in_toml`: + /// + /// ```toml + /// [dependencies] + /// foo = "0.1" + /// ``` + /// + /// and ... + /// + /// ```toml + /// [dependencies] + /// foo = { version = "0.1", package = 'bar' } + /// ``` + pub fn name_in_toml(&self) -> InternedString { + self.explicit_name_in_toml().unwrap_or(self.inner.name) + } + + /// The name of the package that this `Dependency` depends on. + /// + /// Usually this is what's written on the left hand side of a dependencies + /// section, but it can also be renamed via the `package` key. + /// + /// Both of the dependencies below return `foo` for `package_name`: + /// + /// ```toml + /// [dependencies] + /// foo = "0.1" + /// ``` + /// + /// and ... + /// + /// ```toml + /// [dependencies] + /// bar = { version = "0.1", package = 'foo' } + /// ``` + pub fn package_name(&self) -> InternedString { + self.inner.name + } + + pub fn source_id(&self) -> SourceId { + self.inner.source_id + } + + pub fn registry_id(&self) -> Option { + self.inner.registry_id + } + + pub fn set_registry_id(&mut self, registry_id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).registry_id = Some(registry_id); + self + } + + pub fn kind(&self) -> Kind { + self.inner.kind + } + + pub fn is_public(&self) -> bool { + self.inner.public + } + + /// Sets whether the dependency is public. + pub fn set_public(&mut self, public: bool) -> &mut Dependency { + if public { + // Setting 'public' only makes sense for normal dependencies + assert_eq!(self.kind(), Kind::Normal); + } + Rc::make_mut(&mut self.inner).public = public; + self + } + + pub fn specified_req(&self) -> bool { + self.inner.specified_req + } + + /// If none, this dependencies must be built for all platforms. + /// If some, it must only be built for the specified platform. + pub fn platform(&self) -> Option<&Platform> { + self.inner.platform.as_ref() + } + + /// The renamed name of this dependency, if any. + /// + /// If the `package` key is used in `Cargo.toml` then this returns the same + /// value as `name_in_toml`. + pub fn explicit_name_in_toml(&self) -> Option { + self.inner.explicit_name_in_toml + } + + pub fn set_kind(&mut self, kind: Kind) -> &mut Dependency { + if self.is_public() { + // Setting 'public' only makes sense for normal dependencies + assert_eq!(kind, Kind::Normal); + } + Rc::make_mut(&mut self.inner).kind = kind; + self + } + + /// Sets the list of features requested for the package. + pub fn set_features( + &mut self, + features: impl IntoIterator>, + ) -> &mut Dependency { + Rc::make_mut(&mut self.inner).features = features + .into_iter() + .map(|s| InternedString::new(s.as_ref())) + .collect(); + self + } + + /// Sets whether the dependency requests default features of the package. + pub fn set_default_features(&mut self, default_features: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).default_features = default_features; + self + } + + /// Sets whether the dependency is optional. + pub fn set_optional(&mut self, optional: bool) -> &mut Dependency { + Rc::make_mut(&mut self.inner).optional = optional; + self + } + + /// Sets the source ID for this dependency. + pub fn set_source_id(&mut self, id: SourceId) -> &mut Dependency { + Rc::make_mut(&mut self.inner).source_id = id; + self + } + + /// Sets the version requirement for this dependency. + pub fn set_version_req(&mut self, req: VersionReq) -> &mut Dependency { + Rc::make_mut(&mut self.inner).req = req; + self + } + + pub fn set_platform(&mut self, platform: Option) -> &mut Dependency { + Rc::make_mut(&mut self.inner).platform = platform; + self + } + + pub fn set_explicit_name_in_toml(&mut self, name: &str) -> &mut Dependency { + Rc::make_mut(&mut self.inner).explicit_name_in_toml = Some(InternedString::new(name)); + self + } + + /// Locks this dependency to depending on the specified package ID. + pub fn lock_to(&mut self, id: PackageId) -> &mut Dependency { + assert_eq!(self.inner.source_id, id.source_id()); + assert!(self.inner.req.matches(id.version())); + trace!( + "locking dep from `{}` with `{}` at {} to {}", + self.package_name(), + self.version_req(), + self.source_id(), + id + ); + self.set_version_req(VersionReq::exact(id.version())) + .set_source_id(id.source_id()) + } + + /// Returns `true` if this is a "locked" dependency, basically whether it has + /// an exact version req. + pub fn is_locked(&self) -> bool { + // Kind of a hack to figure this out, but it works! + self.inner.req.to_string().starts_with('=') + } + + /// Returns `false` if the dependency is only used to build the local package. + pub fn is_transitive(&self) -> bool { + match self.inner.kind { + Kind::Normal | Kind::Build => true, + Kind::Development => false, + } + } + + pub fn is_build(&self) -> bool { + match self.inner.kind { + Kind::Build => true, + _ => false, + } + } + + pub fn is_optional(&self) -> bool { + self.inner.optional + } + + /// Returns `true` if the default features of the dependency are requested. + pub fn uses_default_features(&self) -> bool { + self.inner.default_features + } + /// Returns the list of features that are requested by the dependency. + pub fn features(&self) -> &[InternedString] { + &self.inner.features + } + + /// Returns `true` if the package (`sum`) can fulfill this dependency request. + pub fn matches(&self, sum: &Summary) -> bool { + self.matches_id(sum.package_id()) + } + + /// Returns `true` if the package (`id`) can fulfill this dependency request. + pub fn matches_ignoring_source(&self, id: PackageId) -> bool { + self.package_name() == id.name() && self.version_req().matches(id.version()) + } + + /// Returns `true` if the package (`id`) can fulfill this dependency request. + pub fn matches_id(&self, id: PackageId) -> bool { + self.inner.name == id.name() + && (self.inner.only_match_name + || (self.inner.req.matches(id.version()) && self.inner.source_id == id.source_id())) + } + + pub fn map_source(mut self, to_replace: SourceId, replace_with: SourceId) -> Dependency { + if self.source_id() != to_replace { + self + } else { + self.set_source_id(replace_with); + self + } + } +} diff --git a/src/cargo/core/resolver/dep_cache.rs b/src/cargo/core/resolver/dep_cache.rs index e20a78a66ae..5df021aa9ce 100644 --- a/src/cargo/core/resolver/dep_cache.rs +++ b/src/cargo/core/resolver/dep_cache.rs @@ -1,463 +1,463 @@ -//! There are 2 sources of facts for the resolver: -//! -//! - The `Registry` tells us for a `Dependency` what versions are available to fulfil it. -//! - The `Summary` tells us for a version (and features) what dependencies need to be fulfilled for it to be activated. -//! -//! These constitute immutable facts, the soled ground truth that all other inference depends on. -//! Theoretically this could all be enumerated ahead of time, but we want to be lazy and only -//! look up things we need to. The compromise is to cache the results as they are computed. -//! -//! This module impl that cache in all the gory details - -use std::cmp::Ordering; -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::rc::Rc; - -use log::debug; - -use crate::core::interning::InternedString; -use crate::core::{Dependency, FeatureValue, PackageId, PackageIdSpec, Registry, Summary}; -use crate::util::errors::CargoResult; - -use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; -use crate::core::resolver::{ActivateResult, ResolveOpts}; - -pub struct RegistryQueryer<'a> { - pub registry: &'a mut (dyn Registry + 'a), - replacements: &'a [(PackageIdSpec, Dependency)], - try_to_use: &'a HashSet, - /// If set the list of dependency candidates will be sorted by minimal - /// versions first. That allows `cargo update -Z minimal-versions` which will - /// specify minimum dependency versions to be used. - minimal_versions: bool, - /// a cache of `Candidate`s that fulfil a `Dependency` - registry_cache: HashMap>>, - /// a cache of `Dependency`s that are required for a `Summary` - summary_cache: HashMap< - (Option, Summary, ResolveOpts), - Rc<(HashSet, Rc>)>, - >, - /// all the cases we ended up using a supplied replacement - used_replacements: HashMap, -} - -impl<'a> RegistryQueryer<'a> { - pub fn new( - registry: &'a mut dyn Registry, - replacements: &'a [(PackageIdSpec, Dependency)], - try_to_use: &'a HashSet, - minimal_versions: bool, - ) -> Self { - RegistryQueryer { - registry, - replacements, - try_to_use, - minimal_versions, - registry_cache: HashMap::new(), - summary_cache: HashMap::new(), - used_replacements: HashMap::new(), - } - } - - pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> { - self.used_replacements.get(&p).map(|r| (p, r.package_id())) - } - - pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> { - self.used_replacements.get(&p) - } - - /// Queries the `registry` to return a list of candidates for `dep`. - /// - /// This method is the location where overrides are taken into account. If - /// any candidates are returned which match an override then the override is - /// applied by performing a second query for what the override should - /// return. - pub fn query(&mut self, dep: &Dependency) -> CargoResult>> { - if let Some(out) = self.registry_cache.get(dep).cloned() { - return Ok(out); - } - - let mut ret = Vec::new(); - self.registry.query( - dep, - &mut |s| { - ret.push(s); - }, - false, - )?; - for summary in ret.iter_mut() { - let mut potential_matches = self - .replacements - .iter() - .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); - - let &(ref spec, ref dep) = match potential_matches.next() { - None => continue, - Some(replacement) => replacement, - }; - debug!( - "found an override for {} {}", - dep.package_name(), - dep.version_req() - ); - - let mut summaries = self.registry.query_vec(dep, false)?.into_iter(); - let s = summaries.next().ok_or_else(|| { - failure::format_err!( - "no matching package for override `{}` found\n\ - location searched: {}\n\ - version required: {}", - spec, - dep.source_id(), - dep.version_req() - ) - })?; - let summaries = summaries.collect::>(); - if !summaries.is_empty() { - let bullets = summaries - .iter() - .map(|s| format!(" * {}", s.package_id())) - .collect::>(); - failure::bail!( - "the replacement specification `{}` matched \ - multiple packages:\n * {}\n{}", - spec, - s.package_id(), - bullets.join("\n") - ); - } - - // The dependency should be hard-coded to have the same name and an - // exact version requirement, so both of these assertions should - // never fail. - assert_eq!(s.version(), summary.version()); - assert_eq!(s.name(), summary.name()); - - let replace = if s.source_id() == summary.source_id() { - debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); - None - } else { - Some(s) - }; - let matched_spec = spec.clone(); - - // Make sure no duplicates - if let Some(&(ref spec, _)) = potential_matches.next() { - failure::bail!( - "overlapping replacement specifications found:\n\n \ - * {}\n * {}\n\nboth specifications match: {}", - matched_spec, - spec, - summary.package_id() - ); - } - - for dep in summary.dependencies() { - debug!("\t{} => {}", dep.package_name(), dep.version_req()); - } - if let Some(r) = replace { - self.used_replacements.insert(summary.package_id(), r); - } - } - - // When we attempt versions for a package we'll want to do so in a - // sorted fashion to pick the "best candidates" first. Currently we try - // prioritized summaries (those in `try_to_use`) and failing that we - // list everything from the maximum version to the lowest version. - ret.sort_unstable_by(|a, b| { - let a_in_previous = self.try_to_use.contains(&a.package_id()); - let b_in_previous = self.try_to_use.contains(&b.package_id()); - let previous_cmp = a_in_previous.cmp(&b_in_previous).reverse(); - match previous_cmp { - Ordering::Equal => { - let cmp = a.version().cmp(b.version()); - if self.minimal_versions { - // Lower version ordered first. - cmp - } else { - // Higher version ordered first. - cmp.reverse() - } - } - _ => previous_cmp, - } - }); - - let out = Rc::new(ret); - - self.registry_cache.insert(dep.clone(), out.clone()); - - Ok(out) - } - - /// Find out what dependencies will be added by activating `candidate`, - /// with features described in `opts`. Then look up in the `registry` - /// the candidates that will fulfil each of these dependencies, as it is the - /// next obvious question. - pub fn build_deps( - &mut self, - parent: Option, - candidate: &Summary, - opts: &ResolveOpts, - ) -> ActivateResult, Rc>)>> { - // if we have calculated a result before, then we can just return it, - // as it is a "pure" query of its arguments. - if let Some(out) = self - .summary_cache - .get(&(parent, candidate.clone(), opts.clone())) - .cloned() - { - return Ok(out); - } - // First, figure out our set of dependencies based on the requested set - // of features. This also calculates what features we're going to enable - // for our own dependencies. - let (used_features, deps) = resolve_features(parent, candidate, opts)?; - - // Next, transform all dependencies into a list of possible candidates - // which can satisfy that dependency. - let mut deps = deps - .into_iter() - .map(|(dep, features)| { - let candidates = self.query(&dep)?; - Ok((dep, candidates, features)) - }) - .collect::>>()?; - - // Attempt to resolve dependencies with fewer candidates before trying - // dependencies with more candidates. This way if the dependency with - // only one candidate can't be resolved we don't have to do a bunch of - // work before we figure that out. - deps.sort_by_key(|&(_, ref a, _)| a.len()); - - let out = Rc::new((used_features, Rc::new(deps))); - - // If we succeed we add the result to the cache so we can use it again next time. - // We dont cache the failure cases as they dont impl Clone. - self.summary_cache - .insert((parent, candidate.clone(), opts.clone()), out.clone()); - - Ok(out) - } -} - -/// Returns the features we ended up using and -/// all dependencies and the features we want from each of them. -pub fn resolve_features<'b>( - parent: Option, - s: &'b Summary, - opts: &'b ResolveOpts, -) -> ActivateResult<(HashSet, Vec<(Dependency, FeaturesSet)>)> { - // First, filter by dev-dependencies. - let deps = s.dependencies(); - let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps); - - let reqs = build_requirements(s, opts)?; - let mut ret = Vec::new(); - let mut used_features = HashSet::new(); - let default_dep = (false, BTreeSet::new()); - - // Next, collect all actually enabled dependencies and their features. - for dep in deps { - // Skip optional dependencies, but not those enabled through a - // feature - if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { - continue; - } - // So we want this dependency. Move the features we want from - // `feature_deps` to `ret` and register ourselves as using this - // name. - let base = reqs.deps.get(&dep.name_in_toml()).unwrap_or(&default_dep); - used_features.insert(dep.name_in_toml()); - let always_required = !dep.is_optional() - && !s - .dependencies() - .iter() - .any(|d| d.is_optional() && d.name_in_toml() == dep.name_in_toml()); - if always_required && base.0 { - return Err(match parent { - None => failure::format_err!( - "Package `{}` does not have feature `{}`. It has a required dependency \ - with that name, but only optional dependencies can be used as features.", - s.package_id(), - dep.name_in_toml() - ) - .into(), - Some(p) => ( - p, - ConflictReason::RequiredDependencyAsFeatures(dep.name_in_toml()), - ) - .into(), - }); - } - let mut base = base.1.clone(); - base.extend(dep.features().iter()); - for feature in base.iter() { - if feature.contains('/') { - return Err(failure::format_err!( - "feature names may not contain slashes: `{}`", - feature - ) - .into()); - } - } - ret.push((dep.clone(), Rc::new(base))); - } - - // Any entries in `reqs.dep` which weren't used are bugs in that the - // package does not actually have those dependencies. We classified - // them as dependencies in the first place because there is no such - // feature, either. - let remaining = reqs - .deps - .keys() - .cloned() - .filter(|s| !used_features.contains(s)) - .collect::>(); - if !remaining.is_empty() { - let features = remaining.join(", "); - return Err(match parent { - None => failure::format_err!( - "Package `{}` does not have these features: `{}`", - s.package_id(), - features - ) - .into(), - Some(p) => (p, ConflictReason::MissingFeatures(features)).into(), - }); - } - - Ok((reqs.into_used(), ret)) -} - -/// Takes requested features for a single package from the input `ResolveOpts` and -/// recurses to find all requested features, dependencies and requested -/// dependency features in a `Requirements` object, returning it to the resolver. -fn build_requirements<'a, 'b: 'a>( - s: &'a Summary, - opts: &'b ResolveOpts, -) -> CargoResult> { - let mut reqs = Requirements::new(s); - - if opts.all_features { - for key in s.features().keys() { - reqs.require_feature(*key)?; - } - for dep in s.dependencies().iter().filter(|d| d.is_optional()) { - reqs.require_dependency(dep.name_in_toml()); - } - } else { - for &f in opts.features.iter() { - reqs.require_value(&FeatureValue::new(f, s))?; - } - } - - if opts.uses_default_features { - if s.features().contains_key("default") { - reqs.require_feature(InternedString::new("default"))?; - } - } - - Ok(reqs) -} - -struct Requirements<'a> { - summary: &'a Summary, - // The deps map is a mapping of package name to list of features enabled. - // Each package should be enabled, and each package should have the - // specified set of features enabled. The boolean indicates whether this - // package was specifically requested (rather than just requesting features - // *within* this package). - deps: HashMap)>, - // The used features set is the set of features which this local package had - // enabled, which is later used when compiling to instruct the code what - // features were enabled. - used: HashSet, - visited: HashSet, -} - -impl Requirements<'_> { - fn new(summary: &Summary) -> Requirements<'_> { - Requirements { - summary, - deps: HashMap::new(), - used: HashSet::new(), - visited: HashSet::new(), - } - } - - fn into_used(self) -> HashSet { - self.used - } - - fn require_crate_feature(&mut self, package: InternedString, feat: InternedString) { - // If `package` is indeed an optional dependency then we activate the - // feature named `package`, but otherwise if `package` is a required - // dependency then there's no feature associated with it. - if let Some(dep) = self - .summary - .dependencies() - .iter() - .find(|p| p.name_in_toml() == package) - { - if dep.is_optional() { - self.used.insert(package); - } - } - self.deps - .entry(package) - .or_insert((false, BTreeSet::new())) - .1 - .insert(feat); - } - - fn seen(&mut self, feat: InternedString) -> bool { - if self.visited.insert(feat) { - self.used.insert(feat); - false - } else { - true - } - } - - fn require_dependency(&mut self, pkg: InternedString) { - if self.seen(pkg) { - return; - } - self.deps.entry(pkg).or_insert((false, BTreeSet::new())).0 = true; - } - - fn require_feature(&mut self, feat: InternedString) -> CargoResult<()> { - if feat.is_empty() || self.seen(feat) { - return Ok(()); - } - for fv in self - .summary - .features() - .get(feat.as_str()) - .expect("must be a valid feature") - { - match *fv { - FeatureValue::Feature(ref dep_feat) if **dep_feat == *feat => failure::bail!( - "cyclic feature dependency: feature `{}` depends on itself", - feat - ), - _ => {} - } - self.require_value(fv)?; - } - Ok(()) - } - - fn require_value(&mut self, fv: &FeatureValue) -> CargoResult<()> { - match fv { - FeatureValue::Feature(feat) => self.require_feature(*feat)?, - FeatureValue::Crate(dep) => self.require_dependency(*dep), - FeatureValue::CrateFeature(dep, dep_feat) => { - self.require_crate_feature(*dep, *dep_feat) - } - }; - Ok(()) - } -} +//! There are 2 sources of facts for the resolver: +//! +//! - The `Registry` tells us for a `Dependency` what versions are available to fulfil it. +//! - The `Summary` tells us for a version (and features) what dependencies need to be fulfilled for it to be activated. +//! +//! These constitute immutable facts, the soled ground truth that all other inference depends on. +//! Theoretically this could all be enumerated ahead of time, but we want to be lazy and only +//! look up things we need to. The compromise is to cache the results as they are computed. +//! +//! This module impl that cache in all the gory details + +use std::cmp::Ordering; +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::rc::Rc; + +use log::debug; + +use crate::core::interning::InternedString; +use crate::core::{Dependency, FeatureValue, PackageId, PackageIdSpec, Registry, Summary}; +use crate::util::errors::CargoResult; + +use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; +use crate::core::resolver::{ActivateResult, ResolveOpts}; + +pub struct RegistryQueryer<'a> { + pub registry: &'a mut (dyn Registry + 'a), + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet, + /// If set the list of dependency candidates will be sorted by minimal + /// versions first. That allows `cargo update -Z minimal-versions` which will + /// specify minimum dependency versions to be used. + minimal_versions: bool, + /// a cache of `Candidate`s that fulfil a `Dependency` + registry_cache: HashMap>>, + /// a cache of `Dependency`s that are required for a `Summary` + summary_cache: HashMap< + (Option, Summary, ResolveOpts), + Rc<(HashSet, Rc>)>, + >, + /// all the cases we ended up using a supplied replacement + used_replacements: HashMap, +} + +impl<'a> RegistryQueryer<'a> { + pub fn new( + registry: &'a mut dyn Registry, + replacements: &'a [(PackageIdSpec, Dependency)], + try_to_use: &'a HashSet, + minimal_versions: bool, + ) -> Self { + RegistryQueryer { + registry, + replacements, + try_to_use, + minimal_versions, + registry_cache: HashMap::new(), + summary_cache: HashMap::new(), + used_replacements: HashMap::new(), + } + } + + pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> { + self.used_replacements.get(&p).map(|r| (p, r.package_id())) + } + + pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> { + self.used_replacements.get(&p) + } + + /// Queries the `registry` to return a list of candidates for `dep`. + /// + /// This method is the location where overrides are taken into account. If + /// any candidates are returned which match an override then the override is + /// applied by performing a second query for what the override should + /// return. + pub fn query(&mut self, dep: &Dependency) -> CargoResult>> { + if let Some(out) = self.registry_cache.get(dep).cloned() { + return Ok(out); + } + + let mut ret = Vec::new(); + self.registry.query( + dep, + &mut |s| { + ret.push(s); + }, + false, + )?; + for summary in ret.iter_mut() { + let mut potential_matches = self + .replacements + .iter() + .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); + + let &(ref spec, ref dep) = match potential_matches.next() { + None => continue, + Some(replacement) => replacement, + }; + debug!( + "found an override for {} {}", + dep.package_name(), + dep.version_req() + ); + + let mut summaries = self.registry.query_vec(dep, false)?.into_iter(); + let s = summaries.next().ok_or_else(|| { + failure::format_err!( + "no matching package for override `{}` found\n\ + location searched: {}\n\ + version required: {}", + spec, + dep.source_id(), + dep.version_req() + ) + })?; + let summaries = summaries.collect::>(); + if !summaries.is_empty() { + let bullets = summaries + .iter() + .map(|s| format!(" * {}", s.package_id())) + .collect::>(); + failure::bail!( + "the replacement specification `{}` matched \ + multiple packages:\n * {}\n{}", + spec, + s.package_id(), + bullets.join("\n") + ); + } + + // The dependency should be hard-coded to have the same name and an + // exact version requirement, so both of these assertions should + // never fail. + assert_eq!(s.version(), summary.version()); + assert_eq!(s.name(), summary.name()); + + let replace = if s.source_id() == summary.source_id() { + debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); + None + } else { + Some(s) + }; + let matched_spec = spec.clone(); + + // Make sure no duplicates + if let Some(&(ref spec, _)) = potential_matches.next() { + failure::bail!( + "overlapping replacement specifications found:\n\n \ + * {}\n * {}\n\nboth specifications match: {}", + matched_spec, + spec, + summary.package_id() + ); + } + + for dep in summary.dependencies() { + debug!("\t{} => {}", dep.package_name(), dep.version_req()); + } + if let Some(r) = replace { + self.used_replacements.insert(summary.package_id(), r); + } + } + + // When we attempt versions for a package we'll want to do so in a + // sorted fashion to pick the "best candidates" first. Currently we try + // prioritized summaries (those in `try_to_use`) and failing that we + // list everything from the maximum version to the lowest version. + ret.sort_unstable_by(|a, b| { + let a_in_previous = self.try_to_use.contains(&a.package_id()); + let b_in_previous = self.try_to_use.contains(&b.package_id()); + let previous_cmp = a_in_previous.cmp(&b_in_previous).reverse(); + match previous_cmp { + Ordering::Equal => { + let cmp = a.version().cmp(b.version()); + if self.minimal_versions { + // Lower version ordered first. + cmp + } else { + // Higher version ordered first. + cmp.reverse() + } + } + _ => previous_cmp, + } + }); + + let out = Rc::new(ret); + + self.registry_cache.insert(dep.clone(), out.clone()); + + Ok(out) + } + + /// Find out what dependencies will be added by activating `candidate`, + /// with features described in `opts`. Then look up in the `registry` + /// the candidates that will fulfil each of these dependencies, as it is the + /// next obvious question. + pub fn build_deps( + &mut self, + parent: Option, + candidate: &Summary, + opts: &ResolveOpts, + ) -> ActivateResult, Rc>)>> { + // if we have calculated a result before, then we can just return it, + // as it is a "pure" query of its arguments. + if let Some(out) = self + .summary_cache + .get(&(parent, candidate.clone(), opts.clone())) + .cloned() + { + return Ok(out); + } + // First, figure out our set of dependencies based on the requested set + // of features. This also calculates what features we're going to enable + // for our own dependencies. + let (used_features, deps) = resolve_features(parent, candidate, opts)?; + + // Next, transform all dependencies into a list of possible candidates + // which can satisfy that dependency. + let mut deps = deps + .into_iter() + .map(|(dep, features)| { + let candidates = self.query(&dep)?; + Ok((dep, candidates, features)) + }) + .collect::>>()?; + + // Attempt to resolve dependencies with fewer candidates before trying + // dependencies with more candidates. This way if the dependency with + // only one candidate can't be resolved we don't have to do a bunch of + // work before we figure that out. + deps.sort_by_key(|&(_, ref a, _)| a.len()); + + let out = Rc::new((used_features, Rc::new(deps))); + + // If we succeed we add the result to the cache so we can use it again next time. + // We dont cache the failure cases as they dont impl Clone. + self.summary_cache + .insert((parent, candidate.clone(), opts.clone()), out.clone()); + + Ok(out) + } +} + +/// Returns the features we ended up using and +/// all dependencies and the features we want from each of them. +pub fn resolve_features<'b>( + parent: Option, + s: &'b Summary, + opts: &'b ResolveOpts, +) -> ActivateResult<(HashSet, Vec<(Dependency, FeaturesSet)>)> { + // First, filter by dev-dependencies. + let deps = s.dependencies(); + let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps); + + let reqs = build_requirements(s, opts)?; + let mut ret = Vec::new(); + let mut used_features = HashSet::new(); + let default_dep = (false, BTreeSet::new()); + + // Next, collect all actually enabled dependencies and their features. + for dep in deps { + // Skip optional dependencies, but not those enabled through a + // feature + if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { + continue; + } + // So we want this dependency. Move the features we want from + // `feature_deps` to `ret` and register ourselves as using this + // name. + let base = reqs.deps.get(&dep.name_in_toml()).unwrap_or(&default_dep); + used_features.insert(dep.name_in_toml()); + let always_required = !dep.is_optional() + && !s + .dependencies() + .iter() + .any(|d| d.is_optional() && d.name_in_toml() == dep.name_in_toml()); + if always_required && base.0 { + return Err(match parent { + None => failure::format_err!( + "Package `{}` does not have feature `{}`. It has a required dependency \ + with that name, but only optional dependencies can be used as features.", + s.package_id(), + dep.name_in_toml() + ) + .into(), + Some(p) => ( + p, + ConflictReason::RequiredDependencyAsFeatures(dep.name_in_toml()), + ) + .into(), + }); + } + let mut base = base.1.clone(); + base.extend(dep.features().iter()); + for feature in base.iter() { + if feature.contains('/') { + return Err(failure::format_err!( + "feature names may not contain slashes: `{}`", + feature + ) + .into()); + } + } + ret.push((dep.clone(), Rc::new(base))); + } + + // Any entries in `reqs.dep` which weren't used are bugs in that the + // package does not actually have those dependencies. We classified + // them as dependencies in the first place because there is no such + // feature, either. + let remaining = reqs + .deps + .keys() + .cloned() + .filter(|s| !used_features.contains(s)) + .collect::>(); + if !remaining.is_empty() { + let features = remaining.join(", "); + return Err(match parent { + None => failure::format_err!( + "Package `{}` does not have these features: `{}`", + s.package_id(), + features + ) + .into(), + Some(p) => (p, ConflictReason::MissingFeatures(features)).into(), + }); + } + + Ok((reqs.into_used(), ret)) +} + +/// Takes requested features for a single package from the input `ResolveOpts` and +/// recurses to find all requested features, dependencies and requested +/// dependency features in a `Requirements` object, returning it to the resolver. +fn build_requirements<'a, 'b: 'a>( + s: &'a Summary, + opts: &'b ResolveOpts, +) -> CargoResult> { + let mut reqs = Requirements::new(s); + + if opts.all_features { + for key in s.features().keys() { + reqs.require_feature(*key)?; + } + for dep in s.dependencies().iter().filter(|d| d.is_optional()) { + reqs.require_dependency(dep.name_in_toml()); + } + } else { + for &f in opts.features.iter() { + reqs.require_value(&FeatureValue::new(f, s))?; + } + } + + if opts.uses_default_features { + if s.features().contains_key("default") { + reqs.require_feature(InternedString::new("default"))?; + } + } + + Ok(reqs) +} + +struct Requirements<'a> { + summary: &'a Summary, + // The deps map is a mapping of package name to list of features enabled. + // Each package should be enabled, and each package should have the + // specified set of features enabled. The boolean indicates whether this + // package was specifically requested (rather than just requesting features + // *within* this package). + deps: HashMap)>, + // The used features set is the set of features which this local package had + // enabled, which is later used when compiling to instruct the code what + // features were enabled. + used: HashSet, + visited: HashSet, +} + +impl Requirements<'_> { + fn new(summary: &Summary) -> Requirements<'_> { + Requirements { + summary, + deps: HashMap::new(), + used: HashSet::new(), + visited: HashSet::new(), + } + } + + fn into_used(self) -> HashSet { + self.used + } + + fn require_crate_feature(&mut self, package: InternedString, feat: InternedString) { + // If `package` is indeed an optional dependency then we activate the + // feature named `package`, but otherwise if `package` is a required + // dependency then there's no feature associated with it. + if let Some(dep) = self + .summary + .dependencies() + .iter() + .find(|p| p.name_in_toml() == package) + { + if dep.is_optional() { + self.used.insert(package); + } + } + self.deps + .entry(package) + .or_insert((false, BTreeSet::new())) + .1 + .insert(feat); + } + + fn seen(&mut self, feat: InternedString) -> bool { + if self.visited.insert(feat) { + self.used.insert(feat); + false + } else { + true + } + } + + fn require_dependency(&mut self, pkg: InternedString) { + if self.seen(pkg) { + return; + } + self.deps.entry(pkg).or_insert((false, BTreeSet::new())).0 = true; + } + + fn require_feature(&mut self, feat: InternedString) -> CargoResult<()> { + if feat.is_empty() || self.seen(feat) { + return Ok(()); + } + let feature = self + .summary + .features() + .get(feat.as_str()) + .expect("must be a valid feature"); + for fv in feature.1.as_slice() { + match *fv { + FeatureValue::Feature(ref dep_feat) if **dep_feat == *feat => failure::bail!( + "cyclic feature dependency: feature `{}` depends on itself", + feat + ), + _ => {} + } + self.require_value(fv)?; + } + Ok(()) + } + + fn require_value(&mut self, fv: &FeatureValue) -> CargoResult<()> { + match fv { + FeatureValue::Feature(feat) => self.require_feature(*feat)?, + FeatureValue::Crate(dep) => self.require_dependency(*dep), + FeatureValue::CrateFeature(dep, dep_feat) => { + self.require_crate_feature(*dep, *dep_feat) + } + }; + Ok(()) + } +} diff --git a/src/cargo/core/resolver/mod.rs b/src/cargo/core/resolver/mod.rs index 4aaa7eeafed..d5cbc45f5d5 100644 --- a/src/cargo/core/resolver/mod.rs +++ b/src/cargo/core/resolver/mod.rs @@ -1,1128 +1,1146 @@ -//! Resolution of the entire dependency graph for a crate. -//! -//! This module implements the core logic in taking the world of crates and -//! constraints and creating a resolved graph with locked versions for all -//! crates and their dependencies. This is separate from the registry module -//! which is more worried about discovering crates from various sources, this -//! module just uses the Registry trait as a source to learn about crates from. -//! -//! Actually solving a constraint graph is an NP-hard problem. This algorithm -//! is basically a nice heuristic to make sure we get roughly the best answer -//! most of the time. The constraints that we're working with are: -//! -//! 1. Each crate can have any number of dependencies. Each dependency can -//! declare a version range that it is compatible with. -//! 2. Crates can be activated with multiple version (e.g., show up in the -//! dependency graph twice) so long as each pairwise instance have -//! semver-incompatible versions. -//! -//! The algorithm employed here is fairly simple, we simply do a DFS, activating -//! the "newest crate" (highest version) first and then going to the next -//! option. The heuristics we employ are: -//! -//! * Never try to activate a crate version which is incompatible. This means we -//! only try crates which will actually satisfy a dependency and we won't ever -//! try to activate a crate that's semver compatible with something else -//! activated (as we're only allowed to have one) nor try to activate a crate -//! that has the same links attribute as something else -//! activated. -//! * Always try to activate the highest version crate first. The default -//! dependency in Cargo (e.g., when you write `foo = "0.1.2"`) is -//! semver-compatible, so selecting the highest version possible will allow us -//! to hopefully satisfy as many dependencies at once. -//! -//! Beyond that, what's implemented below is just a naive backtracking version -//! which should in theory try all possible combinations of dependencies and -//! versions to see if one works. The first resolution that works causes -//! everything to bail out immediately and return success, and only if *nothing* -//! works do we actually return an error up the stack. -//! -//! ## Performance -//! -//! Note that this is a relatively performance-critical portion of Cargo. The -//! data that we're processing is proportional to the size of the dependency -//! graph, which can often be quite large (e.g., take a look at Servo). To make -//! matters worse the DFS algorithm we're implemented is inherently quite -//! inefficient. When we add the requirement of backtracking on top it means -//! that we're implementing something that probably shouldn't be allocating all -//! over the place. - -use std::collections::{BTreeMap, HashMap, HashSet}; -use std::mem; -use std::rc::Rc; -use std::time::{Duration, Instant}; - -use log::{debug, trace}; - -use crate::core::PackageIdSpec; -use crate::core::{Dependency, PackageId, Registry, Summary}; -use crate::util::config::Config; -use crate::util::errors::CargoResult; -use crate::util::profile; - -use self::context::Context; -use self::dep_cache::RegistryQueryer; -use self::types::{ConflictMap, ConflictReason, DepsFrame}; -use self::types::{FeaturesSet, RcVecIter, RemainingDeps, ResolverProgress}; - -pub use self::encode::Metadata; -pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; -pub use self::errors::{ActivateError, ActivateResult, ResolveError}; -pub use self::resolve::{Resolve, ResolveVersion}; -pub use self::types::ResolveOpts; - -mod conflict_cache; -mod context; -mod dep_cache; -mod encode; -mod errors; -mod resolve; -mod types; - -/// Builds the list of all packages required to build the first argument. -/// -/// * `summaries` - the list of package summaries along with how to resolve -/// their features. This is a list of all top-level packages that are intended -/// to be part of the lock file (resolve output). These typically are a list -/// of all workspace members. -/// -/// * `replacements` - this is a list of `[replace]` directives found in the -/// root of the workspace. The list here is a `PackageIdSpec` of what to -/// replace and a `Dependency` to replace that with. In general it's not -/// recommended to use `[replace]` any more and use `[patch]` instead, which -/// is supported elsewhere. -/// -/// * `registry` - this is the source from which all package summaries are -/// loaded. It's expected that this is extensively configured ahead of time -/// and is idempotent with our requests to it (aka returns the same results -/// for the same query every time). Typically this is an instance of a -/// `PackageRegistry`. -/// -/// * `try_to_use` - this is a list of package IDs which were previously found -/// in the lock file. We heuristically prefer the ids listed in `try_to_use` -/// when sorting candidates to activate, but otherwise this isn't used -/// anywhere else. -/// -/// * `config` - a location to print warnings and such, or `None` if no warnings -/// should be printed -/// -/// * `print_warnings` - whether or not to print backwards-compatibility -/// warnings and such -/// -/// * `check_public_visible_dependencies` - a flag for whether to enforce the restrictions -/// introduced in the "public & private dependencies" RFC (1977). The current implementation -/// makes sure that there is only one version of each name visible to each package. -/// -/// But there are 2 stable ways to directly depend on different versions of the same name. -/// 1. Use the renamed dependencies functionality -/// 2. Use 'cfg({})' dependencies functionality -/// -/// When we have a decision for how to implement is without breaking existing functionality -/// this flag can be removed. -pub fn resolve( - summaries: &[(Summary, ResolveOpts)], - replacements: &[(PackageIdSpec, Dependency)], - registry: &mut dyn Registry, - try_to_use: &HashSet, - config: Option<&Config>, - check_public_visible_dependencies: bool, -) -> CargoResult { - let cx = Context::new(check_public_visible_dependencies); - let _p = profile::start("resolving"); - let minimal_versions = match config { - Some(config) => config.cli_unstable().minimal_versions, - None => false, - }; - let mut registry = RegistryQueryer::new(registry, replacements, try_to_use, minimal_versions); - let cx = activate_deps_loop(cx, &mut registry, summaries, config)?; - - let mut cksums = HashMap::new(); - for (summary, _) in cx.activations.values() { - let cksum = summary.checksum().map(|s| s.to_string()); - cksums.insert(summary.package_id(), cksum); - } - let resolve = Resolve::new( - cx.graph(), - cx.resolve_replacements(®istry), - cx.resolve_features - .iter() - .map(|(k, v)| (*k, v.iter().map(|x| x.to_string()).collect())) - .collect(), - cksums, - BTreeMap::new(), - Vec::new(), - ResolveVersion::default(), - ); - - check_cycles(&resolve)?; - check_duplicate_pkgs_in_lockfile(&resolve)?; - trace!("resolved: {:?}", resolve); - - Ok(resolve) -} - -/// Recursively activates the dependencies for `top`, in depth-first order, -/// backtracking across possible candidates for each dependency as necessary. -/// -/// If all dependencies can be activated and resolved to a version in the -/// dependency graph, cx.resolve is returned. -fn activate_deps_loop( - mut cx: Context, - registry: &mut RegistryQueryer<'_>, - summaries: &[(Summary, ResolveOpts)], - config: Option<&Config>, -) -> CargoResult { - let mut backtrack_stack = Vec::new(); - let mut remaining_deps = RemainingDeps::new(); - - // `past_conflicting_activations` is a cache of the reasons for each time we - // backtrack. - let mut past_conflicting_activations = conflict_cache::ConflictCache::new(); - - // Activate all the initial summaries to kick off some work. - for &(ref summary, ref opts) in summaries { - debug!("initial activation: {}", summary.package_id()); - let res = activate(&mut cx, registry, None, summary.clone(), opts.clone()); - match res { - Ok(Some((frame, _))) => remaining_deps.push(frame), - Ok(None) => (), - Err(ActivateError::Fatal(e)) => return Err(e), - Err(ActivateError::Conflict(_, _)) => panic!("bad error from activate"), - } - } - - let mut printed = ResolverProgress::new(); - - // Main resolution loop, this is the workhorse of the resolution algorithm. - // - // You'll note that a few stacks are maintained on the side, which might - // seem odd when this algorithm looks like it could be implemented - // recursively. While correct, this is implemented iteratively to avoid - // blowing the stack (the recursion depth is proportional to the size of the - // input). - // - // The general sketch of this loop is to run until there are no dependencies - // left to activate, and for each dependency to attempt to activate all of - // its own dependencies in turn. The `backtrack_stack` is a side table of - // backtracking states where if we hit an error we can return to in order to - // attempt to continue resolving. - while let Some((just_here_for_the_error_messages, frame)) = - remaining_deps.pop_most_constrained() - { - let (mut parent, (mut dep, candidates, mut features)) = frame; - - // If we spend a lot of time here (we shouldn't in most cases) then give - // a bit of a visual indicator as to what we're doing. - printed.shell_status(config)?; - - trace!( - "{}[{}]>{} {} candidates", - parent.name(), - cx.age(), - dep.package_name(), - candidates.len() - ); - - let just_here_for_the_error_messages = just_here_for_the_error_messages - && past_conflicting_activations - .conflicting(&cx, &dep) - .is_some(); - - let mut remaining_candidates = RemainingCandidates::new(&candidates); - - // `conflicting_activations` stores all the reasons we were unable to - // activate candidates. One of these reasons will have to go away for - // backtracking to find a place to restart. It is also the list of - // things to explain in the error message if we fail to resolve. - // - // This is a map of package ID to a reason why that packaged caused a - // conflict for us. - let mut conflicting_activations = ConflictMap::new(); - - // When backtracking we don't fully update `conflicting_activations` - // especially for the cases that we didn't make a backtrack frame in the - // first place. This `backtracked` var stores whether we are continuing - // from a restored backtrack frame so that we can skip caching - // `conflicting_activations` in `past_conflicting_activations` - let mut backtracked = false; - - loop { - let next = remaining_candidates.next( - &mut conflicting_activations, - &cx, - &dep, - parent.package_id(), - ); - - let (candidate, has_another) = next.ok_or(()).or_else(|_| { - // If we get here then our `remaining_candidates` was just - // exhausted, so `dep` failed to activate. - // - // It's our job here to backtrack, if possible, and find a - // different candidate to activate. If we can't find any - // candidates whatsoever then it's time to bail entirely. - trace!( - "{}[{}]>{} -- no candidates", - parent.name(), - cx.age(), - dep.package_name() - ); - - // Use our list of `conflicting_activations` to add to our - // global list of past conflicting activations, effectively - // globally poisoning `dep` if `conflicting_activations` ever - // shows up again. We'll use the `past_conflicting_activations` - // below to determine if a dependency is poisoned and skip as - // much work as possible. - // - // If we're only here for the error messages then there's no - // need to try this as this dependency is already known to be - // bad. - // - // As we mentioned above with the `backtracked` variable if this - // local is set to `true` then our `conflicting_activations` may - // not be right, so we can't push into our global cache. - let mut generalize_conflicting_activations = None; - if !just_here_for_the_error_messages && !backtracked { - past_conflicting_activations.insert(&dep, &conflicting_activations); - if let Some(c) = generalize_conflicting( - &cx, - registry, - &mut past_conflicting_activations, - &parent, - &dep, - &conflicting_activations, - ) { - generalize_conflicting_activations = Some(c); - } - } - - match find_candidate( - &cx, - &mut backtrack_stack, - &parent, - backtracked, - generalize_conflicting_activations - .as_ref() - .unwrap_or(&conflicting_activations), - ) { - Some((candidate, has_another, frame)) => { - // Reset all of our local variables used with the - // contents of `frame` to complete our backtrack. - cx = frame.context; - remaining_deps = frame.remaining_deps; - remaining_candidates = frame.remaining_candidates; - parent = frame.parent; - dep = frame.dep; - features = frame.features; - conflicting_activations = frame.conflicting_activations; - backtracked = true; - Ok((candidate, has_another)) - } - None => { - debug!("no candidates found"); - Err(errors::activation_error( - &cx, - registry.registry, - &parent, - &dep, - &conflicting_activations, - &candidates, - config, - )) - } - } - })?; - - // If we're only here for the error messages then we know that this - // activation will fail one way or another. To that end if we've got - // more candidates we want to fast-forward to the last one as - // otherwise we'll just backtrack here anyway (helping us to skip - // some work). - if just_here_for_the_error_messages && !backtracked && has_another { - continue; - } - - // We have a `candidate`. Create a `BacktrackFrame` so we can add it - // to the `backtrack_stack` later if activation succeeds. - // - // Note that if we don't actually have another candidate then there - // will be nothing to backtrack to so we skip construction of the - // frame. This is a relatively important optimization as a number of - // the `clone` calls below can be quite expensive, so we avoid them - // if we can. - let backtrack = if has_another { - Some(BacktrackFrame { - context: Context::clone(&cx), - remaining_deps: remaining_deps.clone(), - remaining_candidates: remaining_candidates.clone(), - parent: Summary::clone(&parent), - dep: Dependency::clone(&dep), - features: Rc::clone(&features), - conflicting_activations: conflicting_activations.clone(), - }) - } else { - None - }; - - let pid = candidate.package_id(); - let opts = ResolveOpts { - dev_deps: false, - features: Rc::clone(&features), - all_features: false, - uses_default_features: dep.uses_default_features(), - }; - trace!( - "{}[{}]>{} trying {}", - parent.name(), - cx.age(), - dep.package_name(), - candidate.version() - ); - let res = activate(&mut cx, registry, Some((&parent, &dep)), candidate, opts); - - let successfully_activated = match res { - // Success! We've now activated our `candidate` in our context - // and we're almost ready to move on. We may want to scrap this - // frame in the end if it looks like it's not going to end well, - // so figure that out here. - Ok(Some((mut frame, dur))) => { - printed.elapsed(dur); - - // Our `frame` here is a new package with its own list of - // dependencies. Do a sanity check here of all those - // dependencies by cross-referencing our global - // `past_conflicting_activations`. Recall that map is a - // global cache which lists sets of packages where, when - // activated, the dependency is unresolvable. - // - // If any our our frame's dependencies fit in that bucket, - // aka known unresolvable, then we extend our own set of - // conflicting activations with theirs. We can do this - // because the set of conflicts we found implies the - // dependency can't be activated which implies that we - // ourselves can't be activated, so we know that they - // conflict with us. - let mut has_past_conflicting_dep = just_here_for_the_error_messages; - if !has_past_conflicting_dep { - if let Some(conflicting) = frame - .remaining_siblings - .clone() - .filter_map(|(ref new_dep, _, _)| { - past_conflicting_activations.conflicting(&cx, new_dep) - }) - .next() - { - // If one of our deps is known unresolvable - // then we will not succeed. - // How ever if we are part of the reason that - // one of our deps conflicts then - // we can make a stronger statement - // because we will definitely be activated when - // we try our dep. - conflicting_activations.extend( - conflicting - .iter() - .filter(|&(p, _)| p != &pid) - .map(|(&p, r)| (p, r.clone())), - ); - - has_past_conflicting_dep = true; - } - } - // If any of `remaining_deps` are known unresolvable with - // us activated, then we extend our own set of - // conflicting activations with theirs and its parent. We can do this - // because the set of conflicts we found implies the - // dependency can't be activated which implies that we - // ourselves are incompatible with that dep, so we know that deps - // parent conflict with us. - if !has_past_conflicting_dep { - if let Some(known_related_bad_deps) = - past_conflicting_activations.dependencies_conflicting_with(pid) - { - if let Some((other_parent, conflict)) = remaining_deps - .iter() - // for deps related to us - .filter(|&(_, ref other_dep)| { - known_related_bad_deps.contains(other_dep) - }) - .filter_map(|(other_parent, other_dep)| { - past_conflicting_activations - .find_conflicting(&cx, &other_dep, Some(pid)) - .map(|con| (other_parent, con)) - }) - .next() - { - let rel = conflict.get(&pid).unwrap().clone(); - - // The conflict we found is - // "other dep will not succeed if we are activated." - // We want to add - // "our dep will not succeed if other dep is in remaining_deps" - // but that is not how the cache is set up. - // So we add the less general but much faster, - // "our dep will not succeed if other dep's parent is activated". - conflicting_activations.extend( - conflict - .iter() - .filter(|&(p, _)| p != &pid) - .map(|(&p, r)| (p, r.clone())), - ); - conflicting_activations.insert(other_parent, rel); - has_past_conflicting_dep = true; - } - } - } - - // Ok if we're in a "known failure" state for this frame we - // may want to skip it altogether though. We don't want to - // skip it though in the case that we're displaying error - // messages to the user! - // - // Here we need to figure out if the user will see if we - // skipped this candidate (if it's known to fail, aka has a - // conflicting dep and we're the last candidate). If we're - // here for the error messages, we can't skip it (but we can - // prune extra work). If we don't have any candidates in our - // backtrack stack then we're the last line of defense, so - // we'll want to present an error message for sure. - let activate_for_error_message = has_past_conflicting_dep && !has_another && { - just_here_for_the_error_messages || { - find_candidate( - &cx, - &mut backtrack_stack.clone(), - &parent, - backtracked, - &conflicting_activations, - ) - .is_none() - } - }; - - // If we're only here for the error messages then we know - // one of our candidate deps will fail, meaning we will - // fail and that none of the backtrack frames will find a - // candidate that will help. Consequently let's clean up the - // no longer needed backtrack frames. - if activate_for_error_message { - backtrack_stack.clear(); - } - - // If we don't know for a fact that we'll fail or if we're - // just here for the error message then we push this frame - // onto our list of to-be-resolve, which will generate more - // work for us later on. - // - // Otherwise we're guaranteed to fail and were not here for - // error messages, so we skip work and don't push anything - // onto our stack. - frame.just_for_error_messages = has_past_conflicting_dep; - if !has_past_conflicting_dep || activate_for_error_message { - remaining_deps.push(frame); - true - } else { - trace!( - "{}[{}]>{} skipping {} ", - parent.name(), - cx.age(), - dep.package_name(), - pid.version() - ); - false - } - } - - // This candidate's already activated, so there's no extra work - // for us to do. Let's keep going. - Ok(None) => true, - - // We failed with a super fatal error (like a network error), so - // bail out as quickly as possible as we can't reliably - // backtrack from errors like these - Err(ActivateError::Fatal(e)) => return Err(e), - - // We failed due to a bland conflict, bah! Record this in our - // frame's list of conflicting activations as to why this - // candidate failed, and then move on. - Err(ActivateError::Conflict(id, reason)) => { - conflicting_activations.insert(id, reason); - false - } - }; - - // If we've successfully activated then save off the backtrack frame - // if one was created, and otherwise break out of the inner - // activation loop as we're ready to move to the next dependency - if successfully_activated { - backtrack_stack.extend(backtrack); - break; - } - - // We've failed to activate this dependency, oh dear! Our call to - // `activate` above may have altered our `cx` local variable, so - // restore it back if we've got a backtrack frame. - // - // If we don't have a backtrack frame then we're just using the `cx` - // for error messages anyway so we can live with a little - // imprecision. - if let Some(b) = backtrack { - cx = b.context; - } - } - - // Ok phew, that loop was a big one! If we've broken out then we've - // successfully activated a candidate. Our stacks are all in place that - // we're ready to move on to the next dependency that needs activation, - // so loop back to the top of the function here. - } - - Ok(cx) -} - -/// Attempts to activate the summary `candidate` in the context `cx`. -/// -/// This function will pull dependency summaries from the registry provided, and -/// the dependencies of the package will be determined by the `opts` provided. -/// If `candidate` was activated, this function returns the dependency frame to -/// iterate through next. -fn activate( - cx: &mut Context, - registry: &mut RegistryQueryer<'_>, - parent: Option<(&Summary, &Dependency)>, - candidate: Summary, - opts: ResolveOpts, -) -> ActivateResult> { - let candidate_pid = candidate.package_id(); - if let Some((parent, dep)) = parent { - let parent_pid = parent.package_id(); - Rc::make_mut( - // add a edge from candidate to parent in the parents graph - cx.parents.link(candidate_pid, parent_pid), - ) - // and associate dep with that edge - .push(dep.clone()); - if let Some(public_dependency) = cx.public_dependency.as_mut() { - // one tricky part is that `candidate_pid` may already be active and - // have public dependencies of its own. So we not only need to mark - // `candidate_pid` as visible to its parents but also all of its existing - // public dependencies. - let existing_public_deps: Vec = public_dependency - .get(&candidate_pid) - .iter() - .flat_map(|x| x.values()) - .filter_map(|x| if x.1 { Some(&x.0) } else { None }) - .chain(&Some(candidate_pid)) - .cloned() - .collect(); - for c in existing_public_deps { - // for each (transitive) parent that can newly see `t` - let mut stack = vec![(parent_pid, dep.is_public())]; - while let Some((p, public)) = stack.pop() { - match public_dependency.entry(p).or_default().entry(c.name()) { - im_rc::hashmap::Entry::Occupied(mut o) => { - // the (transitive) parent can already see something by `c`s name, it had better be `c`. - assert_eq!(o.get().0, c); - if o.get().1 { - // The previous time the parent saw `c`, it was a public dependency. - // So all of its parents already know about `c` - // and we can save some time by stopping now. - continue; - } - if public { - // Mark that `c` has now bean seen publicly - o.insert((c, public)); - } - } - im_rc::hashmap::Entry::Vacant(v) => { - // The (transitive) parent does not have anything by `c`s name, - // so we add `c`. - v.insert((c, public)); - } - } - // if `candidate_pid` was a private dependency of `p` then `p` parents can't see `c` thru `p` - if public { - // if it was public, then we add all of `p`s parents to be checked - for &(grand, ref d) in cx.parents.edges(&p) { - stack.push((grand, d.iter().any(|x| x.is_public()))); - } - } - } - } - } - } - - let activated = cx.flag_activated(&candidate, &opts, parent)?; - - let candidate = match registry.replacement_summary(candidate_pid) { - Some(replace) => { - // Note the `None` for parent here since `[replace]` is a bit wonky - // and doesn't activate the same things that `[patch]` typically - // does. TBH it basically cause panics in the test suite if - // `parent` is passed through here and `[replace]` is otherwise - // on life support so it's not critical to fix bugs anyway per se. - if cx.flag_activated(replace, &opts, None)? && activated { - return Ok(None); - } - trace!( - "activating {} (replacing {})", - replace.package_id(), - candidate_pid - ); - replace.clone() - } - None => { - if activated { - return Ok(None); - } - trace!("activating {}", candidate_pid); - candidate - } - }; - - let now = Instant::now(); - let (used_features, deps) = - &*registry.build_deps(parent.map(|p| p.0.package_id()), &candidate, &opts)?; - - // Record what list of features is active for this package. - if !used_features.is_empty() { - Rc::make_mut( - cx.resolve_features - .entry(candidate.package_id()) - .or_insert_with(Rc::default), - ) - .extend(used_features); - } - - let frame = DepsFrame { - parent: candidate, - just_for_error_messages: false, - remaining_siblings: RcVecIter::new(Rc::clone(deps)), - }; - Ok(Some((frame, now.elapsed()))) -} - -#[derive(Clone)] -struct BacktrackFrame { - context: Context, - remaining_deps: RemainingDeps, - remaining_candidates: RemainingCandidates, - parent: Summary, - dep: Dependency, - features: FeaturesSet, - conflicting_activations: ConflictMap, -} - -/// A helper "iterator" used to extract candidates within a current `Context` of -/// a dependency graph. -/// -/// This struct doesn't literally implement the `Iterator` trait (requires a few -/// more inputs) but in general acts like one. Each `RemainingCandidates` is -/// created with a list of candidates to choose from. When attempting to iterate -/// over the list of candidates only *valid* candidates are returned. Validity -/// is defined within a `Context`. -/// -/// Candidates passed to `new` may not be returned from `next` as they could be -/// filtered out, and as they are filtered the causes will be added to `conflicting_prev_active`. -#[derive(Clone)] -struct RemainingCandidates { - remaining: RcVecIter, - // This is a inlined peekable generator - has_another: Option, -} - -impl RemainingCandidates { - fn new(candidates: &Rc>) -> RemainingCandidates { - RemainingCandidates { - remaining: RcVecIter::new(Rc::clone(candidates)), - has_another: None, - } - } - - /// Attempts to find another candidate to check from this list. - /// - /// This method will attempt to move this iterator forward, returning a - /// candidate that's possible to activate. The `cx` argument is the current - /// context which determines validity for candidates returned, and the `dep` - /// is the dependency listing that we're activating for. - /// - /// If successful a `(Candidate, bool)` pair will be returned. The - /// `Candidate` is the candidate to attempt to activate, and the `bool` is - /// an indicator of whether there are remaining candidates to try of if - /// we've reached the end of iteration. - /// - /// If we've reached the end of the iterator here then `Err` will be - /// returned. The error will contain a map of package ID to conflict reason, - /// where each package ID caused a candidate to be filtered out from the - /// original list for the reason listed. - fn next( - &mut self, - conflicting_prev_active: &mut ConflictMap, - cx: &Context, - dep: &Dependency, - parent: PackageId, - ) -> Option<(Summary, bool)> { - 'main: for b in self.remaining.by_ref() { - let b_id = b.package_id(); - // The `links` key in the manifest dictates that there's only one - // package in a dependency graph, globally, with that particular - // `links` key. If this candidate links to something that's already - // linked to by a different package then we've gotta skip this. - if let Some(link) = b.links() { - if let Some(&a) = cx.links.get(&link) { - if a != b_id { - conflicting_prev_active - .entry(a) - .or_insert_with(|| ConflictReason::Links(link)); - continue; - } - } - } - - // Otherwise the condition for being a valid candidate relies on - // semver. Cargo dictates that you can't duplicate multiple - // semver-compatible versions of a crate. For example we can't - // simultaneously activate `foo 1.0.2` and `foo 1.2.0`. We can, - // however, activate `1.0.2` and `2.0.0`. - // - // Here we throw out our candidate if it's *compatible*, yet not - // equal, to all previously activated versions. - if let Some((a, _)) = cx.activations.get(&b_id.as_activations_key()) { - if *a != b { - conflicting_prev_active - .entry(a.package_id()) - .or_insert(ConflictReason::Semver); - continue; - } - } - // We may still have to reject do to a public dependency conflict. If one of any of our - // ancestors that can see us already knows about a different crate with this name then - // we have to reject this candidate. Additionally this candidate may already have been - // activated and have public dependants of its own, - // all of witch also need to be checked the same way. - if let Some(public_dependency) = cx.public_dependency.as_ref() { - let existing_public_deps: Vec = public_dependency - .get(&b_id) - .iter() - .flat_map(|x| x.values()) - .filter_map(|x| if x.1 { Some(&x.0) } else { None }) - .chain(&Some(b_id)) - .cloned() - .collect(); - for t in existing_public_deps { - // for each (transitive) parent that can newly see `t` - let mut stack = vec![(parent, dep.is_public())]; - while let Some((p, public)) = stack.pop() { - // TODO: dont look at the same thing more then once - if let Some(o) = public_dependency.get(&p).and_then(|x| x.get(&t.name())) { - if o.0 != t { - // the (transitive) parent can already see a different version by `t`s name. - // So, adding `b` will cause `p` to have a public dependency conflict on `t`. - conflicting_prev_active.insert(p, ConflictReason::PublicDependency); - continue 'main; - } - } - // if `b` was a private dependency of `p` then `p` parents can't see `t` thru `p` - if public { - // if it was public, then we add all of `p`s parents to be checked - for &(grand, ref d) in cx.parents.edges(&p) { - stack.push((grand, d.iter().any(|x| x.is_public()))); - } - } - } - } - } - - // Well if we made it this far then we've got a valid dependency. We - // want this iterator to be inherently "peekable" so we don't - // necessarily return the item just yet. Instead we stash it away to - // get returned later, and if we replaced something then that was - // actually the candidate to try first so we return that. - if let Some(r) = mem::replace(&mut self.has_another, Some(b)) { - return Some((r, true)); - } - } - - // Alright we've entirely exhausted our list of candidates. If we've got - // something stashed away return that here (also indicating that there's - // nothing else). - self.has_another.take().map(|r| (r, false)) - } -} - -/// Attempts to find a new conflict that allows a `find_candidate` feather then the input one. -/// It will add the new conflict to the cache if one is found. -/// -/// Panics if the input conflict is not all active in `cx`. -fn generalize_conflicting( - cx: &Context, - registry: &mut RegistryQueryer<'_>, - past_conflicting_activations: &mut conflict_cache::ConflictCache, - parent: &Summary, - dep: &Dependency, - conflicting_activations: &ConflictMap, -) -> Option { - if conflicting_activations.is_empty() { - return None; - } - // We need to determine the `ContextAge` that this `conflicting_activations` will jump to, and why. - let (backtrack_critical_age, backtrack_critical_id) = conflicting_activations - .keys() - .map(|&c| (cx.is_active(c).expect("not currently active!?"), c)) - .max() - .unwrap(); - let backtrack_critical_reason: ConflictReason = - conflicting_activations[&backtrack_critical_id].clone(); - - if cx - .parents - .is_path_from_to(&parent.package_id(), &backtrack_critical_id) - { - // We are a descendant of the trigger of the problem. - // The best generalization of this is to let things bubble up - // and let `backtrack_critical_id` figure this out. - return None; - } - // What parents does that critical activation have - for (critical_parent, critical_parents_deps) in - cx.parents.edges(&backtrack_critical_id).filter(|(p, _)| { - // it will only help backjump further if it is older then the critical_age - cx.is_active(*p).expect("parent not currently active!?") < backtrack_critical_age - }) - { - for critical_parents_dep in critical_parents_deps.iter() { - // A dep is equivalent to one of the things it can resolve to. - // Thus, if all the things it can resolve to have already ben determined - // to be conflicting, then we can just say that we conflict with the parent. - if let Some(others) = registry - .query(critical_parents_dep) - .expect("an already used dep now error!?") - .iter() - .rev() // the last one to be tried is the least likely to be in the cache, so start with that. - .map(|other| { - past_conflicting_activations - .find( - dep, - &|id| { - if id == other.package_id() { - // we are imagining that we used other instead - Some(backtrack_critical_age) - } else { - cx.is_active(id) - } - }, - Some(other.package_id()), - // we only care about things that are newer then critical_age - backtrack_critical_age, - ) - .map(|con| (other.package_id(), con)) - }) - .collect::>>() - { - let mut con = conflicting_activations.clone(); - // It is always valid to combine previously inserted conflicts. - // A, B are both known bad states each that can never be activated. - // A + B is redundant but cant be activated, as if - // A + B is active then A is active and we know that is not ok. - for (_, other) in &others { - con.extend(other.iter().map(|(&id, re)| (id, re.clone()))); - } - // Now that we have this combined conflict, we can do a substitution: - // A dep is equivalent to one of the things it can resolve to. - // So we can remove all the things that it resolves to and replace with the parent. - for (other_id, _) in &others { - con.remove(other_id); - } - con.insert(*critical_parent, backtrack_critical_reason); - - if cfg!(debug_assertions) { - // the entire point is to find an older conflict, so let's make sure we did - let new_age = con - .keys() - .map(|&c| cx.is_active(c).expect("not currently active!?")) - .max() - .unwrap(); - assert!( - new_age < backtrack_critical_age, - "new_age {} < backtrack_critical_age {}", - new_age, - backtrack_critical_age - ); - } - past_conflicting_activations.insert(dep, &con); - return Some(con); - } - } - } - None -} - -/// Looks through the states in `backtrack_stack` for dependencies with -/// remaining candidates. For each one, also checks if rolling back -/// could change the outcome of the failed resolution that caused backtracking -/// in the first place. Namely, if we've backtracked past the parent of the -/// failed dep, or any of the packages flagged as giving us trouble in -/// `conflicting_activations`. -/// -/// Read -/// For several more detailed explanations of the logic here. -fn find_candidate( - cx: &Context, - backtrack_stack: &mut Vec, - parent: &Summary, - backtracked: bool, - conflicting_activations: &ConflictMap, -) -> Option<(Summary, bool, BacktrackFrame)> { - // When we're calling this method we know that `parent` failed to - // activate. That means that some dependency failed to get resolved for - // whatever reason. Normally, that means that all of those reasons - // (plus maybe some extras) are listed in `conflicting_activations`. - // - // The abnormal situations are things that do not put all of the reasons in `conflicting_activations`: - // If we backtracked we do not know how our `conflicting_activations` related to - // the cause of that backtrack, so we do not update it. - // If we had a PublicDependency conflict, then we do not yet have a compact way to - // represent all the parts of the problem, so `conflicting_activations` is incomplete. - let age = if !backtracked - && !conflicting_activations - .values() - .any(|c| *c == ConflictReason::PublicDependency) - { - // we dont have abnormal situations. So we can ask `cx` for how far back we need to go. - let a = cx.is_conflicting(Some(parent.package_id()), conflicting_activations); - // If the `conflicting_activations` does not apply to `cx`, then something went very wrong - // in building it. But we will just fall back to laboriously trying all possibilities witch - // will give us the correct answer so only `assert` if there is a developer to debug it. - debug_assert!(a.is_some()); - a - } else { - None - }; - - while let Some(mut frame) = backtrack_stack.pop() { - let next = frame.remaining_candidates.next( - &mut frame.conflicting_activations, - &frame.context, - &frame.dep, - frame.parent.package_id(), - ); - let (candidate, has_another) = match next { - Some(pair) => pair, - None => continue, - }; - - // If all members of `conflicting_activations` are still - // active in this back up we know that we're guaranteed to not actually - // make any progress. As a result if we hit this condition we can - // completely skip this backtrack frame and move on to the next. - if let Some(age) = age { - if frame.context.age() > age { - trace!( - "{} = \"{}\" skip as not solving {}: {:?}", - frame.dep.package_name(), - frame.dep.version_req(), - parent.package_id(), - conflicting_activations - ); - // above we use `cx` to determine that this is still going to be conflicting. - // but lets just double check. - debug_assert!( - frame - .context - .is_conflicting(Some(parent.package_id()), conflicting_activations) - == Some(age) - ); - continue; - } else { - // above we use `cx` to determine that this is not going to be conflicting. - // but lets just double check. - debug_assert!(frame - .context - .is_conflicting(Some(parent.package_id()), conflicting_activations) - .is_none()); - } - } - - return Some((candidate, has_another, frame)); - } - None -} - -fn check_cycles(resolve: &Resolve) -> CargoResult<()> { - // Sort packages to produce user friendly deterministic errors. - let mut all_packages: Vec<_> = resolve.iter().collect(); - all_packages.sort_unstable(); - let mut checked = HashSet::new(); - for pkg in all_packages { - if !checked.contains(&pkg) { - visit(resolve, pkg, &mut HashSet::new(), &mut checked)? - } - } - return Ok(()); - - fn visit( - resolve: &Resolve, - id: PackageId, - visited: &mut HashSet, - checked: &mut HashSet, - ) -> CargoResult<()> { - // See if we visited ourselves - if !visited.insert(id) { - failure::bail!( - "cyclic package dependency: package `{}` depends on itself. Cycle:\n{}", - id, - errors::describe_path(&resolve.path_to_top(&id)) - ); - } - - // If we've already checked this node no need to recurse again as we'll - // just conclude the same thing as last time, so we only execute the - // recursive step if we successfully insert into `checked`. - // - // Note that if we hit an intransitive dependency then we clear out the - // visitation list as we can't induce a cycle through transitive - // dependencies. - if checked.insert(id) { - for (dep, listings) in resolve.deps_not_replaced(id) { - let is_transitive = listings.iter().any(|d| d.is_transitive()); - let mut empty = HashSet::new(); - let visited = if is_transitive { - &mut *visited - } else { - &mut empty - }; - visit(resolve, dep, visited, checked)?; - - if let Some(id) = resolve.replacement(dep) { - visit(resolve, id, visited, checked)?; - } - } - } - - // Ok, we're done, no longer visiting our node any more - visited.remove(&id); - Ok(()) - } -} - -/// Checks that packages are unique when written to lock file. -/// -/// When writing package ID's to lock file, we apply lossy encoding. In -/// particular, we don't store paths of path dependencies. That means that -/// *different* packages may collide in the lock file, hence this check. -fn check_duplicate_pkgs_in_lockfile(resolve: &Resolve) -> CargoResult<()> { - let mut unique_pkg_ids = HashMap::new(); - let state = encode::EncodeState::new(resolve); - for pkg_id in resolve.iter() { - let encodable_pkd_id = encode::encodable_package_id(pkg_id, &state); - if let Some(prev_pkg_id) = unique_pkg_ids.insert(encodable_pkd_id, pkg_id) { - failure::bail!( - "package collision in the lockfile: packages {} and {} are different, \ - but only one can be written to lockfile unambiguously", - prev_pkg_id, - pkg_id - ) - } - } - Ok(()) -} +//! Resolution of the entire dependency graph for a crate. +//! +//! This module implements the core logic in taking the world of crates and +//! constraints and creating a resolved graph with locked versions for all +//! crates and their dependencies. This is separate from the registry module +//! which is more worried about discovering crates from various sources, this +//! module just uses the Registry trait as a source to learn about crates from. +//! +//! Actually solving a constraint graph is an NP-hard problem. This algorithm +//! is basically a nice heuristic to make sure we get roughly the best answer +//! most of the time. The constraints that we're working with are: +//! +//! 1. Each crate can have any number of dependencies. Each dependency can +//! declare a version range that it is compatible with. +//! 2. Crates can be activated with multiple version (e.g., show up in the +//! dependency graph twice) so long as each pairwise instance have +//! semver-incompatible versions. +//! +//! The algorithm employed here is fairly simple, we simply do a DFS, activating +//! the "newest crate" (highest version) first and then going to the next +//! option. The heuristics we employ are: +//! +//! * Never try to activate a crate version which is incompatible. This means we +//! only try crates which will actually satisfy a dependency and we won't ever +//! try to activate a crate that's semver compatible with something else +//! activated (as we're only allowed to have one) nor try to activate a crate +//! that has the same links attribute as something else +//! activated. +//! * Always try to activate the highest version crate first. The default +//! dependency in Cargo (e.g., when you write `foo = "0.1.2"`) is +//! semver-compatible, so selecting the highest version possible will allow us +//! to hopefully satisfy as many dependencies at once. +//! +//! Beyond that, what's implemented below is just a naive backtracking version +//! which should in theory try all possible combinations of dependencies and +//! versions to see if one works. The first resolution that works causes +//! everything to bail out immediately and return success, and only if *nothing* +//! works do we actually return an error up the stack. +//! +//! ## Performance +//! +//! Note that this is a relatively performance-critical portion of Cargo. The +//! data that we're processing is proportional to the size of the dependency +//! graph, which can often be quite large (e.g., take a look at Servo). To make +//! matters worse the DFS algorithm we're implemented is inherently quite +//! inefficient. When we add the requirement of backtracking on top it means +//! that we're implementing something that probably shouldn't be allocating all +//! over the place. + +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::mem; +use std::rc::Rc; +use std::time::{Duration, Instant}; + +use log::{debug, trace}; + +use crate::core::PackageIdSpec; +use crate::core::{Dependency, PackageId, Registry, Summary}; +use crate::util::config::Config; +use crate::util::errors::CargoResult; +use crate::util::profile; + +use self::context::Context; +use self::dep_cache::RegistryQueryer; +use self::types::{ConflictMap, ConflictReason, DepsFrame}; +use self::types::{FeaturesSet, RcVecIter, RemainingDeps, ResolverProgress}; + +pub use self::encode::Metadata; +pub use self::encode::{EncodableDependency, EncodablePackageId, EncodableResolve}; +pub use self::errors::{ActivateError, ActivateResult, ResolveError}; +pub use self::resolve::{Resolve, ResolveVersion}; +pub use self::types::ResolveOpts; + +mod conflict_cache; +mod context; +mod dep_cache; +mod encode; +mod errors; +mod resolve; +mod types; + +/// Builds the list of all packages required to build the first argument. +/// +/// * `summaries` - the list of package summaries along with how to resolve +/// their features. This is a list of all top-level packages that are intended +/// to be part of the lock file (resolve output). These typically are a list +/// of all workspace members. +/// +/// * `replacements` - this is a list of `[replace]` directives found in the +/// root of the workspace. The list here is a `PackageIdSpec` of what to +/// replace and a `Dependency` to replace that with. In general it's not +/// recommended to use `[replace]` any more and use `[patch]` instead, which +/// is supported elsewhere. +/// +/// * `registry` - this is the source from which all package summaries are +/// loaded. It's expected that this is extensively configured ahead of time +/// and is idempotent with our requests to it (aka returns the same results +/// for the same query every time). Typically this is an instance of a +/// `PackageRegistry`. +/// +/// * `try_to_use` - this is a list of package IDs which were previously found +/// in the lock file. We heuristically prefer the ids listed in `try_to_use` +/// when sorting candidates to activate, but otherwise this isn't used +/// anywhere else. +/// +/// * `config` - a location to print warnings and such, or `None` if no warnings +/// should be printed +/// +/// * `print_warnings` - whether or not to print backwards-compatibility +/// warnings and such +/// +/// * `check_public_visible_dependencies` - a flag for whether to enforce the restrictions +/// introduced in the "public & private dependencies" RFC (1977). The current implementation +/// makes sure that there is only one version of each name visible to each package. +/// +/// But there are 2 stable ways to directly depend on different versions of the same name. +/// 1. Use the renamed dependencies functionality +/// 2. Use 'cfg({})' dependencies functionality +/// +/// When we have a decision for how to implement is without breaking existing functionality +/// this flag can be removed. +pub fn resolve( + summaries: &[(Summary, ResolveOpts)], + replacements: &[(PackageIdSpec, Dependency)], + registry: &mut dyn Registry, + try_to_use: &HashSet, + config: Option<&Config>, + check_public_visible_dependencies: bool, +) -> CargoResult { + let cx = Context::new(check_public_visible_dependencies); + let _p = profile::start("resolving"); + let minimal_versions = match config { + Some(config) => config.cli_unstable().minimal_versions, + None => false, + }; + let mut registry = RegistryQueryer::new(registry, replacements, try_to_use, minimal_versions); + let cx = activate_deps_loop(cx, &mut registry, summaries, config)?; + + let mut cksums = HashMap::new(); + for (summary, _) in cx.activations.values() { + let cksum = summary.checksum().map(|s| s.to_string()); + cksums.insert(summary.package_id(), cksum); + } + let resolve = Resolve::new( + cx.graph(), + cx.resolve_replacements(®istry), + cx.resolve_features + .iter() + .map(|(k, v)| { + ( + *k, + v.iter() + .map(|x| { + let platform = summaries + .iter() + .find(|(summary, _)| summary.features().get(x).is_some()) + .map(|(summary, _)| summary.features().get(x).unwrap().0.clone()); + let platform = if let Some(platform) = platform { + platform + } else { + None + }; + (x.to_string(), platform) + }) + .collect(), + ) + }) + .collect(), + cksums, + BTreeMap::new(), + Vec::new(), + ResolveVersion::default(), + ); + + check_cycles(&resolve)?; + check_duplicate_pkgs_in_lockfile(&resolve)?; + trace!("resolved: {:?}", resolve); + + Ok(resolve) +} + +/// Recursively activates the dependencies for `top`, in depth-first order, +/// backtracking across possible candidates for each dependency as necessary. +/// +/// If all dependencies can be activated and resolved to a version in the +/// dependency graph, cx.resolve is returned. +fn activate_deps_loop( + mut cx: Context, + registry: &mut RegistryQueryer<'_>, + summaries: &[(Summary, ResolveOpts)], + config: Option<&Config>, +) -> CargoResult { + let mut backtrack_stack = Vec::new(); + let mut remaining_deps = RemainingDeps::new(); + + // `past_conflicting_activations` is a cache of the reasons for each time we + // backtrack. + let mut past_conflicting_activations = conflict_cache::ConflictCache::new(); + + // Activate all the initial summaries to kick off some work. + for &(ref summary, ref opts) in summaries { + debug!("initial activation: {}", summary.package_id()); + let res = activate(&mut cx, registry, None, summary.clone(), opts.clone()); + match res { + Ok(Some((frame, _))) => remaining_deps.push(frame), + Ok(None) => (), + Err(ActivateError::Fatal(e)) => return Err(e), + Err(ActivateError::Conflict(_, _)) => panic!("bad error from activate"), + } + } + + let mut printed = ResolverProgress::new(); + + // Main resolution loop, this is the workhorse of the resolution algorithm. + // + // You'll note that a few stacks are maintained on the side, which might + // seem odd when this algorithm looks like it could be implemented + // recursively. While correct, this is implemented iteratively to avoid + // blowing the stack (the recursion depth is proportional to the size of the + // input). + // + // The general sketch of this loop is to run until there are no dependencies + // left to activate, and for each dependency to attempt to activate all of + // its own dependencies in turn. The `backtrack_stack` is a side table of + // backtracking states where if we hit an error we can return to in order to + // attempt to continue resolving. + while let Some((just_here_for_the_error_messages, frame)) = + remaining_deps.pop_most_constrained() + { + let (mut parent, (mut dep, candidates, mut features)) = frame; + + // If we spend a lot of time here (we shouldn't in most cases) then give + // a bit of a visual indicator as to what we're doing. + printed.shell_status(config)?; + + trace!( + "{}[{}]>{} {} candidates", + parent.name(), + cx.age(), + dep.package_name(), + candidates.len() + ); + + let just_here_for_the_error_messages = just_here_for_the_error_messages + && past_conflicting_activations + .conflicting(&cx, &dep) + .is_some(); + + let mut remaining_candidates = RemainingCandidates::new(&candidates); + + // `conflicting_activations` stores all the reasons we were unable to + // activate candidates. One of these reasons will have to go away for + // backtracking to find a place to restart. It is also the list of + // things to explain in the error message if we fail to resolve. + // + // This is a map of package ID to a reason why that packaged caused a + // conflict for us. + let mut conflicting_activations = ConflictMap::new(); + + // When backtracking we don't fully update `conflicting_activations` + // especially for the cases that we didn't make a backtrack frame in the + // first place. This `backtracked` var stores whether we are continuing + // from a restored backtrack frame so that we can skip caching + // `conflicting_activations` in `past_conflicting_activations` + let mut backtracked = false; + + loop { + let next = remaining_candidates.next( + &mut conflicting_activations, + &cx, + &dep, + parent.package_id(), + ); + + let (candidate, has_another) = next.ok_or(()).or_else(|_| { + // If we get here then our `remaining_candidates` was just + // exhausted, so `dep` failed to activate. + // + // It's our job here to backtrack, if possible, and find a + // different candidate to activate. If we can't find any + // candidates whatsoever then it's time to bail entirely. + trace!( + "{}[{}]>{} -- no candidates", + parent.name(), + cx.age(), + dep.package_name() + ); + + // Use our list of `conflicting_activations` to add to our + // global list of past conflicting activations, effectively + // globally poisoning `dep` if `conflicting_activations` ever + // shows up again. We'll use the `past_conflicting_activations` + // below to determine if a dependency is poisoned and skip as + // much work as possible. + // + // If we're only here for the error messages then there's no + // need to try this as this dependency is already known to be + // bad. + // + // As we mentioned above with the `backtracked` variable if this + // local is set to `true` then our `conflicting_activations` may + // not be right, so we can't push into our global cache. + let mut generalize_conflicting_activations = None; + if !just_here_for_the_error_messages && !backtracked { + past_conflicting_activations.insert(&dep, &conflicting_activations); + if let Some(c) = generalize_conflicting( + &cx, + registry, + &mut past_conflicting_activations, + &parent, + &dep, + &conflicting_activations, + ) { + generalize_conflicting_activations = Some(c); + } + } + + match find_candidate( + &cx, + &mut backtrack_stack, + &parent, + backtracked, + generalize_conflicting_activations + .as_ref() + .unwrap_or(&conflicting_activations), + ) { + Some((candidate, has_another, frame)) => { + // Reset all of our local variables used with the + // contents of `frame` to complete our backtrack. + cx = frame.context; + remaining_deps = frame.remaining_deps; + remaining_candidates = frame.remaining_candidates; + parent = frame.parent; + dep = frame.dep; + features = frame.features; + conflicting_activations = frame.conflicting_activations; + backtracked = true; + Ok((candidate, has_another)) + } + None => { + debug!("no candidates found"); + Err(errors::activation_error( + &cx, + registry.registry, + &parent, + &dep, + &conflicting_activations, + &candidates, + config, + )) + } + } + })?; + + // If we're only here for the error messages then we know that this + // activation will fail one way or another. To that end if we've got + // more candidates we want to fast-forward to the last one as + // otherwise we'll just backtrack here anyway (helping us to skip + // some work). + if just_here_for_the_error_messages && !backtracked && has_another { + continue; + } + + // We have a `candidate`. Create a `BacktrackFrame` so we can add it + // to the `backtrack_stack` later if activation succeeds. + // + // Note that if we don't actually have another candidate then there + // will be nothing to backtrack to so we skip construction of the + // frame. This is a relatively important optimization as a number of + // the `clone` calls below can be quite expensive, so we avoid them + // if we can. + let backtrack = if has_another { + Some(BacktrackFrame { + context: Context::clone(&cx), + remaining_deps: remaining_deps.clone(), + remaining_candidates: remaining_candidates.clone(), + parent: Summary::clone(&parent), + dep: Dependency::clone(&dep), + features: Rc::clone(&features), + conflicting_activations: conflicting_activations.clone(), + }) + } else { + None + }; + + let pid = candidate.package_id(); + let opts = ResolveOpts { + dev_deps: false, + features: Rc::clone(&features), + all_features: false, + uses_default_features: dep.uses_default_features(), + }; + trace!( + "{}[{}]>{} trying {}", + parent.name(), + cx.age(), + dep.package_name(), + candidate.version() + ); + let res = activate(&mut cx, registry, Some((&parent, &dep)), candidate, opts); + + let successfully_activated = match res { + // Success! We've now activated our `candidate` in our context + // and we're almost ready to move on. We may want to scrap this + // frame in the end if it looks like it's not going to end well, + // so figure that out here. + Ok(Some((mut frame, dur))) => { + printed.elapsed(dur); + + // Our `frame` here is a new package with its own list of + // dependencies. Do a sanity check here of all those + // dependencies by cross-referencing our global + // `past_conflicting_activations`. Recall that map is a + // global cache which lists sets of packages where, when + // activated, the dependency is unresolvable. + // + // If any our our frame's dependencies fit in that bucket, + // aka known unresolvable, then we extend our own set of + // conflicting activations with theirs. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves can't be activated, so we know that they + // conflict with us. + let mut has_past_conflicting_dep = just_here_for_the_error_messages; + if !has_past_conflicting_dep { + if let Some(conflicting) = frame + .remaining_siblings + .clone() + .filter_map(|(ref new_dep, _, _)| { + past_conflicting_activations.conflicting(&cx, new_dep) + }) + .next() + { + // If one of our deps is known unresolvable + // then we will not succeed. + // How ever if we are part of the reason that + // one of our deps conflicts then + // we can make a stronger statement + // because we will definitely be activated when + // we try our dep. + conflicting_activations.extend( + conflicting + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(&p, r)| (p, r.clone())), + ); + + has_past_conflicting_dep = true; + } + } + // If any of `remaining_deps` are known unresolvable with + // us activated, then we extend our own set of + // conflicting activations with theirs and its parent. We can do this + // because the set of conflicts we found implies the + // dependency can't be activated which implies that we + // ourselves are incompatible with that dep, so we know that deps + // parent conflict with us. + if !has_past_conflicting_dep { + if let Some(known_related_bad_deps) = + past_conflicting_activations.dependencies_conflicting_with(pid) + { + if let Some((other_parent, conflict)) = remaining_deps + .iter() + // for deps related to us + .filter(|&(_, ref other_dep)| { + known_related_bad_deps.contains(other_dep) + }) + .filter_map(|(other_parent, other_dep)| { + past_conflicting_activations + .find_conflicting(&cx, &other_dep, Some(pid)) + .map(|con| (other_parent, con)) + }) + .next() + { + let rel = conflict.get(&pid).unwrap().clone(); + + // The conflict we found is + // "other dep will not succeed if we are activated." + // We want to add + // "our dep will not succeed if other dep is in remaining_deps" + // but that is not how the cache is set up. + // So we add the less general but much faster, + // "our dep will not succeed if other dep's parent is activated". + conflicting_activations.extend( + conflict + .iter() + .filter(|&(p, _)| p != &pid) + .map(|(&p, r)| (p, r.clone())), + ); + conflicting_activations.insert(other_parent, rel); + has_past_conflicting_dep = true; + } + } + } + + // Ok if we're in a "known failure" state for this frame we + // may want to skip it altogether though. We don't want to + // skip it though in the case that we're displaying error + // messages to the user! + // + // Here we need to figure out if the user will see if we + // skipped this candidate (if it's known to fail, aka has a + // conflicting dep and we're the last candidate). If we're + // here for the error messages, we can't skip it (but we can + // prune extra work). If we don't have any candidates in our + // backtrack stack then we're the last line of defense, so + // we'll want to present an error message for sure. + let activate_for_error_message = has_past_conflicting_dep && !has_another && { + just_here_for_the_error_messages || { + find_candidate( + &cx, + &mut backtrack_stack.clone(), + &parent, + backtracked, + &conflicting_activations, + ) + .is_none() + } + }; + + // If we're only here for the error messages then we know + // one of our candidate deps will fail, meaning we will + // fail and that none of the backtrack frames will find a + // candidate that will help. Consequently let's clean up the + // no longer needed backtrack frames. + if activate_for_error_message { + backtrack_stack.clear(); + } + + // If we don't know for a fact that we'll fail or if we're + // just here for the error message then we push this frame + // onto our list of to-be-resolve, which will generate more + // work for us later on. + // + // Otherwise we're guaranteed to fail and were not here for + // error messages, so we skip work and don't push anything + // onto our stack. + frame.just_for_error_messages = has_past_conflicting_dep; + if !has_past_conflicting_dep || activate_for_error_message { + remaining_deps.push(frame); + true + } else { + trace!( + "{}[{}]>{} skipping {} ", + parent.name(), + cx.age(), + dep.package_name(), + pid.version() + ); + false + } + } + + // This candidate's already activated, so there's no extra work + // for us to do. Let's keep going. + Ok(None) => true, + + // We failed with a super fatal error (like a network error), so + // bail out as quickly as possible as we can't reliably + // backtrack from errors like these + Err(ActivateError::Fatal(e)) => return Err(e), + + // We failed due to a bland conflict, bah! Record this in our + // frame's list of conflicting activations as to why this + // candidate failed, and then move on. + Err(ActivateError::Conflict(id, reason)) => { + conflicting_activations.insert(id, reason); + false + } + }; + + // If we've successfully activated then save off the backtrack frame + // if one was created, and otherwise break out of the inner + // activation loop as we're ready to move to the next dependency + if successfully_activated { + backtrack_stack.extend(backtrack); + break; + } + + // We've failed to activate this dependency, oh dear! Our call to + // `activate` above may have altered our `cx` local variable, so + // restore it back if we've got a backtrack frame. + // + // If we don't have a backtrack frame then we're just using the `cx` + // for error messages anyway so we can live with a little + // imprecision. + if let Some(b) = backtrack { + cx = b.context; + } + } + + // Ok phew, that loop was a big one! If we've broken out then we've + // successfully activated a candidate. Our stacks are all in place that + // we're ready to move on to the next dependency that needs activation, + // so loop back to the top of the function here. + } + + Ok(cx) +} + +/// Attempts to activate the summary `candidate` in the context `cx`. +/// +/// This function will pull dependency summaries from the registry provided, and +/// the dependencies of the package will be determined by the `opts` provided. +/// If `candidate` was activated, this function returns the dependency frame to +/// iterate through next. +fn activate( + cx: &mut Context, + registry: &mut RegistryQueryer<'_>, + parent: Option<(&Summary, &Dependency)>, + candidate: Summary, + opts: ResolveOpts, +) -> ActivateResult> { + let candidate_pid = candidate.package_id(); + if let Some((parent, dep)) = parent { + let parent_pid = parent.package_id(); + Rc::make_mut( + // add a edge from candidate to parent in the parents graph + cx.parents.link(candidate_pid, parent_pid), + ) + // and associate dep with that edge + .push(dep.clone()); + if let Some(public_dependency) = cx.public_dependency.as_mut() { + // one tricky part is that `candidate_pid` may already be active and + // have public dependencies of its own. So we not only need to mark + // `candidate_pid` as visible to its parents but also all of its existing + // public dependencies. + let existing_public_deps: Vec = public_dependency + .get(&candidate_pid) + .iter() + .flat_map(|x| x.values()) + .filter_map(|x| if x.1 { Some(&x.0) } else { None }) + .chain(&Some(candidate_pid)) + .cloned() + .collect(); + for c in existing_public_deps { + // for each (transitive) parent that can newly see `t` + let mut stack = vec![(parent_pid, dep.is_public())]; + while let Some((p, public)) = stack.pop() { + match public_dependency.entry(p).or_default().entry(c.name()) { + im_rc::hashmap::Entry::Occupied(mut o) => { + // the (transitive) parent can already see something by `c`s name, it had better be `c`. + assert_eq!(o.get().0, c); + if o.get().1 { + // The previous time the parent saw `c`, it was a public dependency. + // So all of its parents already know about `c` + // and we can save some time by stopping now. + continue; + } + if public { + // Mark that `c` has now bean seen publicly + o.insert((c, public)); + } + } + im_rc::hashmap::Entry::Vacant(v) => { + // The (transitive) parent does not have anything by `c`s name, + // so we add `c`. + v.insert((c, public)); + } + } + // if `candidate_pid` was a private dependency of `p` then `p` parents can't see `c` thru `p` + if public { + // if it was public, then we add all of `p`s parents to be checked + for &(grand, ref d) in cx.parents.edges(&p) { + stack.push((grand, d.iter().any(|x| x.is_public()))); + } + } + } + } + } + } + + let activated = cx.flag_activated(&candidate, &opts, parent)?; + + let candidate = match registry.replacement_summary(candidate_pid) { + Some(replace) => { + // Note the `None` for parent here since `[replace]` is a bit wonky + // and doesn't activate the same things that `[patch]` typically + // does. TBH it basically cause panics in the test suite if + // `parent` is passed through here and `[replace]` is otherwise + // on life support so it's not critical to fix bugs anyway per se. + if cx.flag_activated(replace, &opts, None)? && activated { + return Ok(None); + } + trace!( + "activating {} (replacing {})", + replace.package_id(), + candidate_pid + ); + replace.clone() + } + None => { + if activated { + return Ok(None); + } + trace!("activating {}", candidate_pid); + candidate + } + }; + + let now = Instant::now(); + let (used_features, deps) = + &*registry.build_deps(parent.map(|p| p.0.package_id()), &candidate, &opts)?; + + // Record what list of features is active for this package. + if !used_features.is_empty() { + Rc::make_mut( + cx.resolve_features + .entry(candidate.package_id()) + .or_insert_with(Rc::default), + ) + .extend(used_features); + } + + let frame = DepsFrame { + parent: candidate, + just_for_error_messages: false, + remaining_siblings: RcVecIter::new(Rc::clone(deps)), + }; + Ok(Some((frame, now.elapsed()))) +} + +#[derive(Clone)] +struct BacktrackFrame { + context: Context, + remaining_deps: RemainingDeps, + remaining_candidates: RemainingCandidates, + parent: Summary, + dep: Dependency, + features: FeaturesSet, + conflicting_activations: ConflictMap, +} + +/// A helper "iterator" used to extract candidates within a current `Context` of +/// a dependency graph. +/// +/// This struct doesn't literally implement the `Iterator` trait (requires a few +/// more inputs) but in general acts like one. Each `RemainingCandidates` is +/// created with a list of candidates to choose from. When attempting to iterate +/// over the list of candidates only *valid* candidates are returned. Validity +/// is defined within a `Context`. +/// +/// Candidates passed to `new` may not be returned from `next` as they could be +/// filtered out, and as they are filtered the causes will be added to `conflicting_prev_active`. +#[derive(Clone)] +struct RemainingCandidates { + remaining: RcVecIter, + // This is a inlined peekable generator + has_another: Option, +} + +impl RemainingCandidates { + fn new(candidates: &Rc>) -> RemainingCandidates { + RemainingCandidates { + remaining: RcVecIter::new(Rc::clone(candidates)), + has_another: None, + } + } + + /// Attempts to find another candidate to check from this list. + /// + /// This method will attempt to move this iterator forward, returning a + /// candidate that's possible to activate. The `cx` argument is the current + /// context which determines validity for candidates returned, and the `dep` + /// is the dependency listing that we're activating for. + /// + /// If successful a `(Candidate, bool)` pair will be returned. The + /// `Candidate` is the candidate to attempt to activate, and the `bool` is + /// an indicator of whether there are remaining candidates to try of if + /// we've reached the end of iteration. + /// + /// If we've reached the end of the iterator here then `Err` will be + /// returned. The error will contain a map of package ID to conflict reason, + /// where each package ID caused a candidate to be filtered out from the + /// original list for the reason listed. + fn next( + &mut self, + conflicting_prev_active: &mut ConflictMap, + cx: &Context, + dep: &Dependency, + parent: PackageId, + ) -> Option<(Summary, bool)> { + 'main: for b in self.remaining.by_ref() { + let b_id = b.package_id(); + // The `links` key in the manifest dictates that there's only one + // package in a dependency graph, globally, with that particular + // `links` key. If this candidate links to something that's already + // linked to by a different package then we've gotta skip this. + if let Some(link) = b.links() { + if let Some(&a) = cx.links.get(&link) { + if a != b_id { + conflicting_prev_active + .entry(a) + .or_insert_with(|| ConflictReason::Links(link)); + continue; + } + } + } + + // Otherwise the condition for being a valid candidate relies on + // semver. Cargo dictates that you can't duplicate multiple + // semver-compatible versions of a crate. For example we can't + // simultaneously activate `foo 1.0.2` and `foo 1.2.0`. We can, + // however, activate `1.0.2` and `2.0.0`. + // + // Here we throw out our candidate if it's *compatible*, yet not + // equal, to all previously activated versions. + if let Some((a, _)) = cx.activations.get(&b_id.as_activations_key()) { + if *a != b { + conflicting_prev_active + .entry(a.package_id()) + .or_insert(ConflictReason::Semver); + continue; + } + } + // We may still have to reject do to a public dependency conflict. If one of any of our + // ancestors that can see us already knows about a different crate with this name then + // we have to reject this candidate. Additionally this candidate may already have been + // activated and have public dependants of its own, + // all of witch also need to be checked the same way. + if let Some(public_dependency) = cx.public_dependency.as_ref() { + let existing_public_deps: Vec = public_dependency + .get(&b_id) + .iter() + .flat_map(|x| x.values()) + .filter_map(|x| if x.1 { Some(&x.0) } else { None }) + .chain(&Some(b_id)) + .cloned() + .collect(); + for t in existing_public_deps { + // for each (transitive) parent that can newly see `t` + let mut stack = vec![(parent, dep.is_public())]; + while let Some((p, public)) = stack.pop() { + // TODO: dont look at the same thing more then once + if let Some(o) = public_dependency.get(&p).and_then(|x| x.get(&t.name())) { + if o.0 != t { + // the (transitive) parent can already see a different version by `t`s name. + // So, adding `b` will cause `p` to have a public dependency conflict on `t`. + conflicting_prev_active.insert(p, ConflictReason::PublicDependency); + continue 'main; + } + } + // if `b` was a private dependency of `p` then `p` parents can't see `t` thru `p` + if public { + // if it was public, then we add all of `p`s parents to be checked + for &(grand, ref d) in cx.parents.edges(&p) { + stack.push((grand, d.iter().any(|x| x.is_public()))); + } + } + } + } + } + + // Well if we made it this far then we've got a valid dependency. We + // want this iterator to be inherently "peekable" so we don't + // necessarily return the item just yet. Instead we stash it away to + // get returned later, and if we replaced something then that was + // actually the candidate to try first so we return that. + if let Some(r) = mem::replace(&mut self.has_another, Some(b)) { + return Some((r, true)); + } + } + + // Alright we've entirely exhausted our list of candidates. If we've got + // something stashed away return that here (also indicating that there's + // nothing else). + self.has_another.take().map(|r| (r, false)) + } +} + +/// Attempts to find a new conflict that allows a `find_candidate` feather then the input one. +/// It will add the new conflict to the cache if one is found. +/// +/// Panics if the input conflict is not all active in `cx`. +fn generalize_conflicting( + cx: &Context, + registry: &mut RegistryQueryer<'_>, + past_conflicting_activations: &mut conflict_cache::ConflictCache, + parent: &Summary, + dep: &Dependency, + conflicting_activations: &ConflictMap, +) -> Option { + if conflicting_activations.is_empty() { + return None; + } + // We need to determine the `ContextAge` that this `conflicting_activations` will jump to, and why. + let (backtrack_critical_age, backtrack_critical_id) = conflicting_activations + .keys() + .map(|&c| (cx.is_active(c).expect("not currently active!?"), c)) + .max() + .unwrap(); + let backtrack_critical_reason: ConflictReason = + conflicting_activations[&backtrack_critical_id].clone(); + + if cx + .parents + .is_path_from_to(&parent.package_id(), &backtrack_critical_id) + { + // We are a descendant of the trigger of the problem. + // The best generalization of this is to let things bubble up + // and let `backtrack_critical_id` figure this out. + return None; + } + // What parents does that critical activation have + for (critical_parent, critical_parents_deps) in + cx.parents.edges(&backtrack_critical_id).filter(|(p, _)| { + // it will only help backjump further if it is older then the critical_age + cx.is_active(*p).expect("parent not currently active!?") < backtrack_critical_age + }) + { + for critical_parents_dep in critical_parents_deps.iter() { + // A dep is equivalent to one of the things it can resolve to. + // Thus, if all the things it can resolve to have already ben determined + // to be conflicting, then we can just say that we conflict with the parent. + if let Some(others) = registry + .query(critical_parents_dep) + .expect("an already used dep now error!?") + .iter() + .rev() // the last one to be tried is the least likely to be in the cache, so start with that. + .map(|other| { + past_conflicting_activations + .find( + dep, + &|id| { + if id == other.package_id() { + // we are imagining that we used other instead + Some(backtrack_critical_age) + } else { + cx.is_active(id) + } + }, + Some(other.package_id()), + // we only care about things that are newer then critical_age + backtrack_critical_age, + ) + .map(|con| (other.package_id(), con)) + }) + .collect::>>() + { + let mut con = conflicting_activations.clone(); + // It is always valid to combine previously inserted conflicts. + // A, B are both known bad states each that can never be activated. + // A + B is redundant but cant be activated, as if + // A + B is active then A is active and we know that is not ok. + for (_, other) in &others { + con.extend(other.iter().map(|(&id, re)| (id, re.clone()))); + } + // Now that we have this combined conflict, we can do a substitution: + // A dep is equivalent to one of the things it can resolve to. + // So we can remove all the things that it resolves to and replace with the parent. + for (other_id, _) in &others { + con.remove(other_id); + } + con.insert(*critical_parent, backtrack_critical_reason); + + if cfg!(debug_assertions) { + // the entire point is to find an older conflict, so let's make sure we did + let new_age = con + .keys() + .map(|&c| cx.is_active(c).expect("not currently active!?")) + .max() + .unwrap(); + assert!( + new_age < backtrack_critical_age, + "new_age {} < backtrack_critical_age {}", + new_age, + backtrack_critical_age + ); + } + past_conflicting_activations.insert(dep, &con); + return Some(con); + } + } + } + None +} + +/// Looks through the states in `backtrack_stack` for dependencies with +/// remaining candidates. For each one, also checks if rolling back +/// could change the outcome of the failed resolution that caused backtracking +/// in the first place. Namely, if we've backtracked past the parent of the +/// failed dep, or any of the packages flagged as giving us trouble in +/// `conflicting_activations`. +/// +/// Read +/// For several more detailed explanations of the logic here. +fn find_candidate( + cx: &Context, + backtrack_stack: &mut Vec, + parent: &Summary, + backtracked: bool, + conflicting_activations: &ConflictMap, +) -> Option<(Summary, bool, BacktrackFrame)> { + // When we're calling this method we know that `parent` failed to + // activate. That means that some dependency failed to get resolved for + // whatever reason. Normally, that means that all of those reasons + // (plus maybe some extras) are listed in `conflicting_activations`. + // + // The abnormal situations are things that do not put all of the reasons in `conflicting_activations`: + // If we backtracked we do not know how our `conflicting_activations` related to + // the cause of that backtrack, so we do not update it. + // If we had a PublicDependency conflict, then we do not yet have a compact way to + // represent all the parts of the problem, so `conflicting_activations` is incomplete. + let age = if !backtracked + && !conflicting_activations + .values() + .any(|c| *c == ConflictReason::PublicDependency) + { + // we dont have abnormal situations. So we can ask `cx` for how far back we need to go. + let a = cx.is_conflicting(Some(parent.package_id()), conflicting_activations); + // If the `conflicting_activations` does not apply to `cx`, then something went very wrong + // in building it. But we will just fall back to laboriously trying all possibilities witch + // will give us the correct answer so only `assert` if there is a developer to debug it. + debug_assert!(a.is_some()); + a + } else { + None + }; + + while let Some(mut frame) = backtrack_stack.pop() { + let next = frame.remaining_candidates.next( + &mut frame.conflicting_activations, + &frame.context, + &frame.dep, + frame.parent.package_id(), + ); + let (candidate, has_another) = match next { + Some(pair) => pair, + None => continue, + }; + + // If all members of `conflicting_activations` are still + // active in this back up we know that we're guaranteed to not actually + // make any progress. As a result if we hit this condition we can + // completely skip this backtrack frame and move on to the next. + if let Some(age) = age { + if frame.context.age() > age { + trace!( + "{} = \"{}\" skip as not solving {}: {:?}", + frame.dep.package_name(), + frame.dep.version_req(), + parent.package_id(), + conflicting_activations + ); + // above we use `cx` to determine that this is still going to be conflicting. + // but lets just double check. + debug_assert!( + frame + .context + .is_conflicting(Some(parent.package_id()), conflicting_activations) + == Some(age) + ); + continue; + } else { + // above we use `cx` to determine that this is not going to be conflicting. + // but lets just double check. + debug_assert!(frame + .context + .is_conflicting(Some(parent.package_id()), conflicting_activations) + .is_none()); + } + } + + return Some((candidate, has_another, frame)); + } + None +} + +fn check_cycles(resolve: &Resolve) -> CargoResult<()> { + // Sort packages to produce user friendly deterministic errors. + let mut all_packages: Vec<_> = resolve.iter().collect(); + all_packages.sort_unstable(); + let mut checked = HashSet::new(); + for pkg in all_packages { + if !checked.contains(&pkg) { + visit(resolve, pkg, &mut HashSet::new(), &mut checked)? + } + } + return Ok(()); + + fn visit( + resolve: &Resolve, + id: PackageId, + visited: &mut HashSet, + checked: &mut HashSet, + ) -> CargoResult<()> { + // See if we visited ourselves + if !visited.insert(id) { + failure::bail!( + "cyclic package dependency: package `{}` depends on itself. Cycle:\n{}", + id, + errors::describe_path(&resolve.path_to_top(&id)) + ); + } + + // If we've already checked this node no need to recurse again as we'll + // just conclude the same thing as last time, so we only execute the + // recursive step if we successfully insert into `checked`. + // + // Note that if we hit an intransitive dependency then we clear out the + // visitation list as we can't induce a cycle through transitive + // dependencies. + if checked.insert(id) { + for (dep, listings) in resolve.deps_not_replaced(id) { + let is_transitive = listings.iter().any(|d| d.is_transitive()); + let mut empty = HashSet::new(); + let visited = if is_transitive { + &mut *visited + } else { + &mut empty + }; + visit(resolve, dep, visited, checked)?; + + if let Some(id) = resolve.replacement(dep) { + visit(resolve, id, visited, checked)?; + } + } + } + + // Ok, we're done, no longer visiting our node any more + visited.remove(&id); + Ok(()) + } +} + +/// Checks that packages are unique when written to lock file. +/// +/// When writing package ID's to lock file, we apply lossy encoding. In +/// particular, we don't store paths of path dependencies. That means that +/// *different* packages may collide in the lock file, hence this check. +fn check_duplicate_pkgs_in_lockfile(resolve: &Resolve) -> CargoResult<()> { + let mut unique_pkg_ids = HashMap::new(); + let state = encode::EncodeState::new(resolve); + for pkg_id in resolve.iter() { + let encodable_pkd_id = encode::encodable_package_id(pkg_id, &state); + if let Some(prev_pkg_id) = unique_pkg_ids.insert(encodable_pkd_id, pkg_id) { + failure::bail!( + "package collision in the lockfile: packages {} and {} are different, \ + but only one can be written to lockfile unambiguously", + prev_pkg_id, + pkg_id + ) + } + } + Ok(()) +} diff --git a/src/cargo/core/resolver/resolve.rs b/src/cargo/core/resolver/resolve.rs index 9ced48f4d67..ebf71519631 100644 --- a/src/cargo/core/resolver/resolve.rs +++ b/src/cargo/core/resolver/resolve.rs @@ -1,394 +1,394 @@ -use std::borrow::Borrow; -use std::collections::{HashMap, HashSet}; -use std::fmt; -use std::iter::FromIterator; - -use url::Url; - -use crate::core::dependency::Kind; -use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target}; -use crate::util::errors::CargoResult; -use crate::util::Graph; - -use super::encode::Metadata; - -/// Represents a fully-resolved package dependency graph. Each node in the graph -/// is a package and edges represent dependencies between packages. -/// -/// Each instance of `Resolve` also understands the full set of features used -/// for each package. -#[derive(PartialEq)] -pub struct Resolve { - /// A graph, whose vertices are packages and edges are dependency specifications - /// from `Cargo.toml`. We need a `Vec` here because the same package - /// might be present in both `[dependencies]` and `[build-dependencies]`. - graph: Graph>, - /// Replacements from the `[replace]` table. - replacements: HashMap, - /// Inverted version of `replacements`. - reverse_replacements: HashMap, - /// An empty `HashSet` to avoid creating a new `HashSet` for every package - /// that does not have any features, and to avoid using `Option` to - /// simplify the API. - empty_features: HashSet, - /// Features enabled for a given package. - features: HashMap>, - /// Checksum for each package. A SHA256 hash of the `.crate` file used to - /// validate the correct crate file is used. This is `None` for sources - /// that do not use `.crate` files, like path or git dependencies. - checksums: HashMap>, - /// "Unknown" metadata. This is a collection of extra, unrecognized data - /// found in the `[metadata]` section of `Cargo.lock`, preserved for - /// forwards compatibility. - metadata: Metadata, - /// `[patch]` entries that did not match anything, preserved in - /// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused - /// patches helps prevent Cargo from being forced to re-update the - /// registry every time it runs, and keeps the resolve in a locked state - /// so it doesn't re-resolve the unused entries. - unused_patches: Vec, - /// A map from packages to a set of their public dependencies - public_dependencies: HashMap>, - /// Version of the `Cargo.lock` format, see - /// `cargo::core::resolver::encode` for more. - version: ResolveVersion, -} - -/// A version to indicate how a `Cargo.lock` should be serialized. Currently V1 -/// is the default and dates back to the origins of Cargo. A V2 is currently -/// being proposed which provides a much more compact representation of -/// dependency edges and also moves checksums out of `[metadata]`. -/// -/// It's theorized that we can add more here over time to track larger changes -/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out. -#[derive(PartialEq, Clone, Debug)] -pub enum ResolveVersion { - V1, - V2, -} - -impl Resolve { - pub fn new( - graph: Graph>, - replacements: HashMap, - features: HashMap>, - checksums: HashMap>, - metadata: Metadata, - unused_patches: Vec, - version: ResolveVersion, - ) -> Resolve { - let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect(); - let public_dependencies = graph - .iter() - .map(|p| { - let public_deps = graph - .edges(p) - .filter(|(_, deps)| { - deps.iter() - .any(|d| d.kind() == Kind::Normal && d.is_public()) - }) - .map(|(dep_package, _)| *dep_package) - .collect::>(); - - (*p, public_deps) - }) - .collect(); - - Resolve { - graph, - replacements, - features, - checksums, - metadata, - unused_patches, - empty_features: HashSet::new(), - reverse_replacements, - public_dependencies, - version, - } - } - - /// Resolves one of the paths from the given dependent package up to - /// the root. - pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> { - self.graph.path_to_top(pkg) - } - - pub fn register_used_patches(&mut self, patches: &HashMap>) { - for summary in patches.values().flat_map(|v| v) { - if self.iter().any(|id| id == summary.package_id()) { - continue; - } - self.unused_patches.push(summary.package_id()); - } - } - - pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> { - // Given a previous instance of resolve, it should be forbidden to ever - // have a checksums which *differ*. If the same package ID has differing - // checksums, then something has gone wrong such as: - // - // * Something got seriously corrupted - // * A "mirror" isn't actually a mirror as some changes were made - // * A replacement source wasn't actually a replacement, some changes - // were made - // - // In all of these cases, we want to report an error to indicate that - // something is awry. Normal execution (esp just using crates.io) should - // never run into this. - for (id, cksum) in previous.checksums.iter() { - if let Some(mine) = self.checksums.get(id) { - if mine == cksum { - continue; - } - - // If the previous checksum wasn't calculated, the current - // checksum is `Some`. This may indicate that a source was - // erroneously replaced or was replaced with something that - // desires stronger checksum guarantees than can be afforded - // elsewhere. - if cksum.is_none() { - failure::bail!( - "\ -checksum for `{}` was not previously calculated, but a checksum could now \ -be calculated - -this could be indicative of a few possible situations: - - * the source `{}` did not previously support checksums, - but was replaced with one that does - * newer Cargo implementations know how to checksum this source, but this - older implementation does not - * the lock file is corrupt -", - id, - id.source_id() - ) - - // If our checksum hasn't been calculated, then it could mean - // that future Cargo figured out how to checksum something or - // more realistically we were overridden with a source that does - // not have checksums. - } else if mine.is_none() { - failure::bail!( - "\ -checksum for `{}` could not be calculated, but a checksum is listed in \ -the existing lock file - -this could be indicative of a few possible situations: - - * the source `{}` supports checksums, - but was replaced with one that doesn't - * the lock file is corrupt - -unable to verify that `{0}` is the same as when the lockfile was generated -", - id, - id.source_id() - ) - - // If the checksums aren't equal, and neither is None, then they - // must both be Some, in which case the checksum now differs. - // That's quite bad! - } else { - failure::bail!( - "\ -checksum for `{}` changed between lock files - -this could be indicative of a few possible errors: - - * the lock file is corrupt - * a replacement source in use (e.g., a mirror) returned a different checksum - * the source itself may be corrupt in one way or another - -unable to verify that `{0}` is the same as when the lockfile was generated -", - id - ); - } - } - } - - // Be sure to just copy over any unknown metadata. - self.metadata = previous.metadata.clone(); - - // The goal of Cargo is largely to preserve the encoding of - // `Cargo.lock` that it finds on the filesystem. Sometimes `Cargo.lock` - // changes are in the works where they haven't been set as the default - // yet but will become the default soon. We want to preserve those - // features if we find them. - // - // For this reason if the previous `Cargo.lock` is from the future, or - // otherwise it looks like it's produced with future features we - // understand, then the new resolve will be encoded with the same - // version. Note that new instances of `Resolve` always use the default - // encoding, and this is where we switch it to a future encoding if the - // future encoding isn't yet the default. - if previous.version.from_the_future() { - self.version = previous.version.clone(); - } - - Ok(()) - } - - pub fn contains(&self, k: &Q) -> bool - where - PackageId: Borrow, - Q: Ord + Eq, - { - self.graph.contains(k) - } - - pub fn sort(&self) -> Vec { - self.graph.sort() - } - - pub fn iter<'a>(&'a self) -> impl Iterator + 'a { - self.graph.iter().cloned() - } - - pub fn deps(&self, pkg: PackageId) -> impl Iterator { - self.deps_not_replaced(pkg) - .map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps)) - } - - pub fn deps_not_replaced( - &self, - pkg: PackageId, - ) -> impl Iterator { - self.graph - .edges(&pkg) - .map(|(id, deps)| (*id, deps.as_slice())) - } - - pub fn replacement(&self, pkg: PackageId) -> Option { - self.replacements.get(&pkg).cloned() - } - - pub fn replacements(&self) -> &HashMap { - &self.replacements - } - - pub fn features(&self, pkg: PackageId) -> &HashSet { - self.features.get(&pkg).unwrap_or(&self.empty_features) - } - - pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool { - self.public_dependencies - .get(&pkg) - .map(|public_deps| public_deps.contains(&dep)) - .unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg)) - } - - pub fn features_sorted(&self, pkg: PackageId) -> Vec<&str> { - let mut v = Vec::from_iter(self.features(pkg).iter().map(|s| s.as_ref())); - v.sort_unstable(); - v - } - - pub fn query(&self, spec: &str) -> CargoResult { - PackageIdSpec::query_str(spec, self.iter()) - } - - pub fn unused_patches(&self) -> &[PackageId] { - &self.unused_patches - } - - pub fn checksums(&self) -> &HashMap> { - &self.checksums - } - - pub fn metadata(&self) -> &Metadata { - &self.metadata - } - - pub fn extern_crate_name( - &self, - from: PackageId, - to: PackageId, - to_target: &Target, - ) -> CargoResult { - let deps = if from == to { - &[] - } else { - self.dependencies_listed(from, to) - }; - - let crate_name = to_target.crate_name(); - let mut names = deps.iter().map(|d| { - d.explicit_name_in_toml() - .map(|s| s.as_str().replace("-", "_")) - .unwrap_or_else(|| crate_name.clone()) - }); - let name = names.next().unwrap_or_else(|| crate_name.clone()); - for n in names { - failure::ensure!( - n == name, - "the crate `{}` depends on crate `{}` multiple times with different names", - from, - to, - ); - } - Ok(name) - } - - fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &[Dependency] { - // We've got a dependency on `from` to `to`, but this dependency edge - // may be affected by [replace]. If the `to` package is listed as the - // target of a replacement (aka the key of a reverse replacement map) - // then we try to find our dependency edge through that. If that fails - // then we go down below assuming it's not replaced. - // - // Note that we don't treat `from` as if it's been replaced because - // that's where the dependency originates from, and we only replace - // targets of dependencies not the originator. - if let Some(replace) = self.reverse_replacements.get(&to) { - if let Some(deps) = self.graph.edge(&from, replace) { - return deps; - } - } - match self.graph.edge(&from, &to) { - Some(ret) => ret, - None => panic!("no Dependency listed for `{}` => `{}`", from, to), - } - } - - /// Returns the version of the encoding that's being used for this lock - /// file. - pub fn version(&self) -> &ResolveVersion { - &self.version - } -} - -impl fmt::Debug for Resolve { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(fmt, "graph: {:?}", self.graph)?; - writeln!(fmt, "\nfeatures: {{")?; - for (pkg, features) in &self.features { - writeln!(fmt, " {}: {:?}", pkg, features)?; - } - write!(fmt, "}}") - } -} - -impl ResolveVersion { - /// The default way to encode `Cargo.lock`. - /// - /// This is used for new `Cargo.lock` files that are generated without a - /// previous `Cargo.lock` files, and generally matches with what we want to - /// encode. - pub fn default() -> ResolveVersion { - ResolveVersion::V1 - } - - /// Returns whether this encoding version is "from the future". - /// - /// This means that this encoding version is not currently the default but - /// intended to become the default "soon". - pub fn from_the_future(&self) -> bool { - match self { - ResolveVersion::V2 => true, - ResolveVersion::V1 => false, - } - } -} +use std::borrow::Borrow; +use std::collections::{HashMap, HashSet}; +use std::fmt; +use std::iter::FromIterator; + +use url::Url; + +use crate::core::dependency::Kind; +use crate::core::{Dependency, PackageId, PackageIdSpec, Summary, Target}; +use crate::util::errors::CargoResult; +use crate::util::{Graph, Platform}; + +use super::encode::Metadata; + +/// Represents a fully-resolved package dependency graph. Each node in the graph +/// is a package and edges represent dependencies between packages. +/// +/// Each instance of `Resolve` also understands the full set of features used +/// for each package. +#[derive(PartialEq)] +pub struct Resolve { + /// A graph, whose vertices are packages and edges are dependency specifications + /// from `Cargo.toml`. We need a `Vec` here because the same package + /// might be present in both `[dependencies]` and `[build-dependencies]`. + graph: Graph>, + /// Replacements from the `[replace]` table. + replacements: HashMap, + /// Inverted version of `replacements`. + reverse_replacements: HashMap, + /// An empty `HashMap` to avoid creating a new `HashMap` for every package + /// that does not have any features, and to avoid using `Option` to + /// simplify the API. + empty_features: HashMap>, + /// Features enabled for a given package. + features: HashMap>>, + /// Checksum for each package. A SHA256 hash of the `.crate` file used to + /// validate the correct crate file is used. This is `None` for sources + /// that do not use `.crate` files, like path or git dependencies. + checksums: HashMap>, + /// "Unknown" metadata. This is a collection of extra, unrecognized data + /// found in the `[metadata]` section of `Cargo.lock`, preserved for + /// forwards compatibility. + metadata: Metadata, + /// `[patch]` entries that did not match anything, preserved in + /// `Cargo.lock` as the `[[patch.unused]]` table array. Tracking unused + /// patches helps prevent Cargo from being forced to re-update the + /// registry every time it runs, and keeps the resolve in a locked state + /// so it doesn't re-resolve the unused entries. + unused_patches: Vec, + /// A map from packages to a set of their public dependencies + public_dependencies: HashMap>, + /// Version of the `Cargo.lock` format, see + /// `cargo::core::resolver::encode` for more. + version: ResolveVersion, +} + +/// A version to indicate how a `Cargo.lock` should be serialized. Currently V1 +/// is the default and dates back to the origins of Cargo. A V2 is currently +/// being proposed which provides a much more compact representation of +/// dependency edges and also moves checksums out of `[metadata]`. +/// +/// It's theorized that we can add more here over time to track larger changes +/// to the `Cargo.lock` format, but we've yet to see how that strategy pans out. +#[derive(PartialEq, Clone, Debug)] +pub enum ResolveVersion { + V1, + V2, +} + +impl Resolve { + pub fn new( + graph: Graph>, + replacements: HashMap, + features: HashMap>>, + checksums: HashMap>, + metadata: Metadata, + unused_patches: Vec, + version: ResolveVersion, + ) -> Resolve { + let reverse_replacements = replacements.iter().map(|(&p, &r)| (r, p)).collect(); + let public_dependencies = graph + .iter() + .map(|p| { + let public_deps = graph + .edges(p) + .filter(|(_, deps)| { + deps.iter() + .any(|d| d.kind() == Kind::Normal && d.is_public()) + }) + .map(|(dep_package, _)| *dep_package) + .collect::>(); + + (*p, public_deps) + }) + .collect(); + + Resolve { + graph, + replacements, + features, + checksums, + metadata, + unused_patches, + empty_features: HashMap::new(), + reverse_replacements, + public_dependencies, + version, + } + } + + /// Resolves one of the paths from the given dependent package up to + /// the root. + pub fn path_to_top<'a>(&'a self, pkg: &'a PackageId) -> Vec<&'a PackageId> { + self.graph.path_to_top(pkg) + } + + pub fn register_used_patches(&mut self, patches: &HashMap>) { + for summary in patches.values().flat_map(|v| v) { + if self.iter().any(|id| id == summary.package_id()) { + continue; + } + self.unused_patches.push(summary.package_id()); + } + } + + pub fn merge_from(&mut self, previous: &Resolve) -> CargoResult<()> { + // Given a previous instance of resolve, it should be forbidden to ever + // have a checksums which *differ*. If the same package ID has differing + // checksums, then something has gone wrong such as: + // + // * Something got seriously corrupted + // * A "mirror" isn't actually a mirror as some changes were made + // * A replacement source wasn't actually a replacement, some changes + // were made + // + // In all of these cases, we want to report an error to indicate that + // something is awry. Normal execution (esp just using crates.io) should + // never run into this. + for (id, cksum) in previous.checksums.iter() { + if let Some(mine) = self.checksums.get(id) { + if mine == cksum { + continue; + } + + // If the previous checksum wasn't calculated, the current + // checksum is `Some`. This may indicate that a source was + // erroneously replaced or was replaced with something that + // desires stronger checksum guarantees than can be afforded + // elsewhere. + if cksum.is_none() { + failure::bail!( + "\ +checksum for `{}` was not previously calculated, but a checksum could now \ +be calculated + +this could be indicative of a few possible situations: + + * the source `{}` did not previously support checksums, + but was replaced with one that does + * newer Cargo implementations know how to checksum this source, but this + older implementation does not + * the lock file is corrupt +", + id, + id.source_id() + ) + + // If our checksum hasn't been calculated, then it could mean + // that future Cargo figured out how to checksum something or + // more realistically we were overridden with a source that does + // not have checksums. + } else if mine.is_none() { + failure::bail!( + "\ +checksum for `{}` could not be calculated, but a checksum is listed in \ +the existing lock file + +this could be indicative of a few possible situations: + + * the source `{}` supports checksums, + but was replaced with one that doesn't + * the lock file is corrupt + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id, + id.source_id() + ) + + // If the checksums aren't equal, and neither is None, then they + // must both be Some, in which case the checksum now differs. + // That's quite bad! + } else { + failure::bail!( + "\ +checksum for `{}` changed between lock files + +this could be indicative of a few possible errors: + + * the lock file is corrupt + * a replacement source in use (e.g., a mirror) returned a different checksum + * the source itself may be corrupt in one way or another + +unable to verify that `{0}` is the same as when the lockfile was generated +", + id + ); + } + } + } + + // Be sure to just copy over any unknown metadata. + self.metadata = previous.metadata.clone(); + + // The goal of Cargo is largely to preserve the encoding of + // `Cargo.lock` that it finds on the filesystem. Sometimes `Cargo.lock` + // changes are in the works where they haven't been set as the default + // yet but will become the default soon. We want to preserve those + // features if we find them. + // + // For this reason if the previous `Cargo.lock` is from the future, or + // otherwise it looks like it's produced with future features we + // understand, then the new resolve will be encoded with the same + // version. Note that new instances of `Resolve` always use the default + // encoding, and this is where we switch it to a future encoding if the + // future encoding isn't yet the default. + if previous.version.from_the_future() { + self.version = previous.version.clone(); + } + + Ok(()) + } + + pub fn contains(&self, k: &Q) -> bool + where + PackageId: Borrow, + Q: Ord + Eq, + { + self.graph.contains(k) + } + + pub fn sort(&self) -> Vec { + self.graph.sort() + } + + pub fn iter<'a>(&'a self) -> impl Iterator + 'a { + self.graph.iter().cloned() + } + + pub fn deps(&self, pkg: PackageId) -> impl Iterator { + self.deps_not_replaced(pkg) + .map(move |(id, deps)| (self.replacement(id).unwrap_or(id), deps)) + } + + pub fn deps_not_replaced( + &self, + pkg: PackageId, + ) -> impl Iterator { + self.graph + .edges(&pkg) + .map(|(id, deps)| (*id, deps.as_slice())) + } + + pub fn replacement(&self, pkg: PackageId) -> Option { + self.replacements.get(&pkg).cloned() + } + + pub fn replacements(&self) -> &HashMap { + &self.replacements + } + + pub fn features(&self, pkg: PackageId) -> &HashMap> { + self.features.get(&pkg).unwrap_or(&self.empty_features) + } + + pub fn is_public_dep(&self, pkg: PackageId, dep: PackageId) -> bool { + self.public_dependencies + .get(&pkg) + .map(|public_deps| public_deps.contains(&dep)) + .unwrap_or_else(|| panic!("Unknown dependency {:?} for package {:?}", dep, pkg)) + } + + pub fn features_sorted(&self, pkg: PackageId) -> Vec<&str> { + let mut v = Vec::from_iter(self.features(pkg).iter().map(|(s, _)| s.as_ref())); + v.sort_unstable(); + v + } + + pub fn query(&self, spec: &str) -> CargoResult { + PackageIdSpec::query_str(spec, self.iter()) + } + + pub fn unused_patches(&self) -> &[PackageId] { + &self.unused_patches + } + + pub fn checksums(&self) -> &HashMap> { + &self.checksums + } + + pub fn metadata(&self) -> &Metadata { + &self.metadata + } + + pub fn extern_crate_name( + &self, + from: PackageId, + to: PackageId, + to_target: &Target, + ) -> CargoResult { + let deps = if from == to { + &[] + } else { + self.dependencies_listed(from, to) + }; + + let crate_name = to_target.crate_name(); + let mut names = deps.iter().map(|d| { + d.explicit_name_in_toml() + .map(|s| s.as_str().replace("-", "_")) + .unwrap_or_else(|| crate_name.clone()) + }); + let name = names.next().unwrap_or_else(|| crate_name.clone()); + for n in names { + failure::ensure!( + n == name, + "the crate `{}` depends on crate `{}` multiple times with different names", + from, + to, + ); + } + Ok(name) + } + + fn dependencies_listed(&self, from: PackageId, to: PackageId) -> &[Dependency] { + // We've got a dependency on `from` to `to`, but this dependency edge + // may be affected by [replace]. If the `to` package is listed as the + // target of a replacement (aka the key of a reverse replacement map) + // then we try to find our dependency edge through that. If that fails + // then we go down below assuming it's not replaced. + // + // Note that we don't treat `from` as if it's been replaced because + // that's where the dependency originates from, and we only replace + // targets of dependencies not the originator. + if let Some(replace) = self.reverse_replacements.get(&to) { + if let Some(deps) = self.graph.edge(&from, replace) { + return deps; + } + } + match self.graph.edge(&from, &to) { + Some(ret) => ret, + None => panic!("no Dependency listed for `{}` => `{}`", from, to), + } + } + + /// Returns the version of the encoding that's being used for this lock + /// file. + pub fn version(&self) -> &ResolveVersion { + &self.version + } +} + +impl fmt::Debug for Resolve { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(fmt, "graph: {:?}", self.graph)?; + writeln!(fmt, "\nfeatures: {{")?; + for (pkg, features) in &self.features { + writeln!(fmt, " {}: {:?}", pkg, features)?; + } + write!(fmt, "}}") + } +} + +impl ResolveVersion { + /// The default way to encode `Cargo.lock`. + /// + /// This is used for new `Cargo.lock` files that are generated without a + /// previous `Cargo.lock` files, and generally matches with what we want to + /// encode. + pub fn default() -> ResolveVersion { + ResolveVersion::V1 + } + + /// Returns whether this encoding version is "from the future". + /// + /// This means that this encoding version is not currently the default but + /// intended to become the default "soon". + pub fn from_the_future(&self) -> bool { + match self { + ResolveVersion::V2 => true, + ResolveVersion::V1 => false, + } + } +} diff --git a/src/cargo/core/summary.rs b/src/cargo/core/summary.rs index fb52b9179d7..5ac651cd813 100644 --- a/src/cargo/core/summary.rs +++ b/src/cargo/core/summary.rs @@ -1,419 +1,423 @@ -use std::borrow::Borrow; -use std::collections::{BTreeMap, HashMap}; -use std::fmt::Display; -use std::hash::{Hash, Hasher}; -use std::mem; -use std::rc::Rc; - -use serde::{Serialize, Serializer}; - -use crate::core::interning::InternedString; -use crate::core::{Dependency, PackageId, SourceId}; -use semver::Version; - -use crate::util::CargoResult; - -/// Subset of a `Manifest`. Contains only the most important information about -/// a package. -/// -/// Summaries are cloned, and should not be mutated after creation -#[derive(Debug, Clone)] -pub struct Summary { - inner: Rc, -} - -#[derive(Debug, Clone)] -struct Inner { - package_id: PackageId, - dependencies: Vec, - features: FeatureMap, - checksum: Option, - links: Option, - namespaced_features: bool, -} - -impl Summary { - pub fn new( - pkg_id: PackageId, - dependencies: Vec, - features: &BTreeMap>>, - links: Option>, - namespaced_features: bool, - ) -> CargoResult - where - K: Borrow + Ord + Display, - { - for dep in dependencies.iter() { - let feature = dep.name_in_toml(); - if !namespaced_features && features.get(&*feature).is_some() { - failure::bail!( - "Features and dependencies cannot have the \ - same name: `{}`", - feature - ) - } - if dep.is_optional() && !dep.is_transitive() { - failure::bail!( - "Dev-dependencies are not allowed to be optional: `{}`", - feature - ) - } - } - let feature_map = build_feature_map(features, &dependencies, namespaced_features)?; - Ok(Summary { - inner: Rc::new(Inner { - package_id: pkg_id, - dependencies, - features: feature_map, - checksum: None, - links: links.map(|l| InternedString::new(l.as_ref())), - namespaced_features, - }), - }) - } - - pub fn package_id(&self) -> PackageId { - self.inner.package_id - } - pub fn name(&self) -> InternedString { - self.package_id().name() - } - pub fn version(&self) -> &Version { - self.package_id().version() - } - pub fn source_id(&self) -> SourceId { - self.package_id().source_id() - } - pub fn dependencies(&self) -> &[Dependency] { - &self.inner.dependencies - } - pub fn features(&self) -> &FeatureMap { - &self.inner.features - } - pub fn checksum(&self) -> Option<&str> { - self.inner.checksum.as_ref().map(|s| &s[..]) - } - pub fn links(&self) -> Option { - self.inner.links - } - pub fn namespaced_features(&self) -> bool { - self.inner.namespaced_features - } - - pub fn override_id(mut self, id: PackageId) -> Summary { - Rc::make_mut(&mut self.inner).package_id = id; - self - } - - pub fn set_checksum(&mut self, cksum: String) { - Rc::make_mut(&mut self.inner).checksum = Some(cksum); - } - - pub fn map_dependencies(mut self, f: F) -> Summary - where - F: FnMut(Dependency) -> Dependency, - { - { - let slot = &mut Rc::make_mut(&mut self.inner).dependencies; - let deps = mem::replace(slot, Vec::new()); - *slot = deps.into_iter().map(f).collect(); - } - self - } - - pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Summary { - let me = if self.package_id().source_id() == to_replace { - let new_id = self.package_id().with_source_id(replace_with); - self.override_id(new_id) - } else { - self - }; - me.map_dependencies(|dep| dep.map_source(to_replace, replace_with)) - } -} - -impl PartialEq for Summary { - fn eq(&self, other: &Summary) -> bool { - self.inner.package_id == other.inner.package_id - } -} - -impl Eq for Summary {} - -impl Hash for Summary { - fn hash(&self, state: &mut H) { - self.inner.package_id.hash(state); - } -} - -// Checks features for errors, bailing out a CargoResult:Err if invalid, -// and creates FeatureValues for each feature. -fn build_feature_map( - features: &BTreeMap>>, - dependencies: &[Dependency], - namespaced: bool, -) -> CargoResult -where - K: Borrow + Ord + Display, -{ - use self::FeatureValue::*; - let mut dep_map = HashMap::new(); - for dep in dependencies.iter() { - dep_map - .entry(dep.name_in_toml()) - .or_insert_with(Vec::new) - .push(dep); - } - - let mut map = BTreeMap::new(); - for (feature, list) in features.iter() { - // If namespaced features is active and the key is the same as that of an - // optional dependency, that dependency must be included in the values. - // Thus, if a `feature` is found that has the same name as a dependency, we - // (a) bail out if the dependency is non-optional, and (b) we track if the - // feature requirements include the dependency `crate:feature` in the list. - // This is done with the `dependency_found` variable, which can only be - // false if features are namespaced and the current feature key is the same - // as the name of an optional dependency. If so, it gets set to true during - // iteration over the list if the dependency is found in the list. - let mut dependency_found = if namespaced { - match dep_map.get(feature.borrow()) { - Some(dep_data) => { - if !dep_data.iter().any(|d| d.is_optional()) { - failure::bail!( - "Feature `{}` includes the dependency of the same name, but this is \ - left implicit in the features included by this feature.\n\ - Additionally, the dependency must be marked as optional to be \ - included in the feature definition.\n\ - Consider adding `crate:{}` to this feature's requirements \ - and marking the dependency as `optional = true`", - feature, - feature - ) - } else { - false - } - } - None => true, - } - } else { - true - }; - - let mut values = vec![]; - for dep in list { - let val = FeatureValue::build( - InternedString::new(dep.as_ref()), - |fs| features.contains_key(fs.as_str()), - namespaced, - ); - - // Find data for the referenced dependency... - let dep_data = { - match val { - Feature(ref dep_name) | Crate(ref dep_name) | CrateFeature(ref dep_name, _) => { - dep_map.get(dep_name.as_str()) - } - } - }; - let is_optional_dep = dep_data - .iter() - .flat_map(|d| d.iter()) - .any(|d| d.is_optional()); - if let FeatureValue::Crate(ref dep_name) = val { - // If we have a dependency value, check if this is the dependency named - // the same as the feature that we were looking for. - if !dependency_found && feature.borrow() == dep_name.as_str() { - dependency_found = true; - } - } - - match (&val, dep_data.is_some(), is_optional_dep) { - // The value is a feature. If features are namespaced, this just means - // it's not prefixed with `crate:`, so we have to check whether the - // feature actually exist. If the feature is not defined *and* an optional - // dependency of the same name exists, the feature is defined implicitly - // here by adding it to the feature map, pointing to the dependency. - // If features are not namespaced, it's been validated as a feature already - // while instantiating the `FeatureValue` in `FeatureValue::build()`, so - // we don't have to do so here. - (&Feature(feat), _, true) => { - if namespaced && !features.contains_key(&*feat) { - map.insert(feat, vec![FeatureValue::Crate(feat)]); - } - } - // If features are namespaced and the value is not defined as a feature - // and there is no optional dependency of the same name, error out. - // If features are not namespaced, there must be an existing feature - // here (checked by `FeatureValue::build()`), so it will always be defined. - (&Feature(feat), dep_exists, false) => { - if namespaced && !features.contains_key(&*feat) { - if dep_exists { - failure::bail!( - "Feature `{}` includes `{}` which is not defined as a feature.\n\ - A non-optional dependency of the same name is defined; consider \ - adding `optional = true` to its definition", - feature, - feat - ) - } else { - failure::bail!( - "Feature `{}` includes `{}` which is not defined as a feature", - feature, - feat - ) - } - } - } - // The value is a dependency. If features are namespaced, it is explicitly - // tagged as such (`crate:value`). If features are not namespaced, any value - // not recognized as a feature is pegged as a `Crate`. Here we handle the case - // where the dependency exists but is non-optional. It branches on namespaced - // just to provide the correct string for the crate dependency in the error. - (&Crate(ref dep), true, false) => { - if namespaced { - failure::bail!( - "Feature `{}` includes `crate:{}` which is not an \ - optional dependency.\nConsider adding \ - `optional = true` to the dependency", - feature, - dep - ) - } else { - failure::bail!( - "Feature `{}` depends on `{}` which is not an \ - optional dependency.\nConsider adding \ - `optional = true` to the dependency", - feature, - dep - ) - } - } - // If namespaced, the value was tagged as a dependency; if not namespaced, - // this could be anything not defined as a feature. This handles the case - // where no such dependency is actually defined; again, the branch on - // namespaced here is just to provide the correct string in the error. - (&Crate(ref dep), false, _) => { - if namespaced { - failure::bail!( - "Feature `{}` includes `crate:{}` which is not a known \ - dependency", - feature, - dep - ) - } else { - failure::bail!( - "Feature `{}` includes `{}` which is neither a dependency nor \ - another feature", - feature, - dep - ) - } - } - (&Crate(_), true, true) => {} - // If the value is a feature for one of the dependencies, bail out if no such - // dependency is actually defined in the manifest. - (&CrateFeature(ref dep, _), false, _) => failure::bail!( - "Feature `{}` requires a feature of `{}` which is not a \ - dependency", - feature, - dep - ), - (&CrateFeature(_, _), true, _) => {} - } - values.push(val); - } - - if !dependency_found { - // If we have not found the dependency of the same-named feature, we should - // bail here. - failure::bail!( - "Feature `{}` includes the optional dependency of the \ - same name, but this is left implicit in the features \ - included by this feature.\nConsider adding \ - `crate:{}` to this feature's requirements.", - feature, - feature - ) - } - - map.insert(InternedString::new(feature.borrow()), values); - } - Ok(map) -} - -/// FeatureValue represents the types of dependencies a feature can have: -/// -/// * Another feature -/// * An optional dependency -/// * A feature in a dependency -/// -/// The selection between these 3 things happens as part of the construction of the FeatureValue. -#[derive(Clone, Debug)] -pub enum FeatureValue { - Feature(InternedString), - Crate(InternedString), - CrateFeature(InternedString, InternedString), -} - -impl FeatureValue { - fn build(feature: InternedString, is_feature: T, namespaced: bool) -> FeatureValue - where - T: Fn(InternedString) -> bool, - { - match (feature.find('/'), namespaced) { - (Some(pos), _) => { - let (dep, dep_feat) = feature.split_at(pos); - let dep_feat = &dep_feat[1..]; - FeatureValue::CrateFeature(InternedString::new(dep), InternedString::new(dep_feat)) - } - (None, true) if feature.starts_with("crate:") => { - FeatureValue::Crate(InternedString::new(&feature[6..])) - } - (None, true) => FeatureValue::Feature(feature), - (None, false) if is_feature(feature) => FeatureValue::Feature(feature), - (None, false) => FeatureValue::Crate(feature), - } - } - - pub fn new(feature: InternedString, s: &Summary) -> FeatureValue { - Self::build( - feature, - |fs| s.features().contains_key(&fs), - s.namespaced_features(), - ) - } - - pub fn to_string(&self, s: &Summary) -> String { - use self::FeatureValue::*; - match *self { - Feature(ref f) => f.to_string(), - Crate(ref c) => { - if s.namespaced_features() { - format!("crate:{}", &c) - } else { - c.to_string() - } - } - CrateFeature(ref c, ref f) => [c.as_ref(), f.as_ref()].join("/"), - } - } -} - -impl Serialize for FeatureValue { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - use self::FeatureValue::*; - match *self { - Feature(ref f) => serializer.serialize_str(f), - Crate(ref c) => serializer.serialize_str(c), - CrateFeature(ref c, ref f) => { - serializer.serialize_str(&[c.as_ref(), f.as_ref()].join("/")) - } - } - } -} - -pub type FeatureMap = BTreeMap>; +use std::borrow::Borrow; +use std::collections::{BTreeMap, HashMap}; +use std::fmt::Display; +use std::hash::{Hash, Hasher}; +use std::mem; +use std::rc::Rc; + +use serde::{Serialize, Serializer}; + +use crate::core::interning::InternedString; +use crate::core::{Dependency, PackageId, SourceId}; +use semver::Version; + +use crate::util::{CargoResult, Platform}; + +/// Subset of a `Manifest`. Contains only the most important information about +/// a package. +/// +/// Summaries are cloned, and should not be mutated after creation +#[derive(Debug, Clone)] +pub struct Summary { + inner: Rc, +} + +#[derive(Debug, Clone)] +struct Inner { + package_id: PackageId, + dependencies: Vec, + features: FeatureMap, + checksum: Option, + links: Option, + namespaced_features: bool, +} + +impl Summary { + pub fn new( + pkg_id: PackageId, + dependencies: Vec, + features: &BTreeMap, Vec>)>, + links: Option>, + namespaced_features: bool, + ) -> CargoResult + where + K: Borrow + Ord + Display, + { + for dep in dependencies.iter() { + let feature = dep.name_in_toml(); + if !namespaced_features && features.get(&*feature).is_some() { + failure::bail!( + "Features and dependencies cannot have the \ + same name: `{}`", + feature + ) + } + if dep.is_optional() && !dep.is_transitive() { + failure::bail!( + "Dev-dependencies are not allowed to be optional: `{}`", + feature + ) + } + } + let feature_map = build_feature_map(features, &dependencies, namespaced_features)?; + Ok(Summary { + inner: Rc::new(Inner { + package_id: pkg_id, + dependencies, + features: feature_map, + checksum: None, + links: links.map(|l| InternedString::new(l.as_ref())), + namespaced_features, + }), + }) + } + + pub fn package_id(&self) -> PackageId { + self.inner.package_id + } + pub fn name(&self) -> InternedString { + self.package_id().name() + } + pub fn version(&self) -> &Version { + self.package_id().version() + } + pub fn source_id(&self) -> SourceId { + self.package_id().source_id() + } + pub fn dependencies(&self) -> &[Dependency] { + &self.inner.dependencies + } + pub fn features(&self) -> &FeatureMap { + &self.inner.features + } + pub fn checksum(&self) -> Option<&str> { + self.inner.checksum.as_ref().map(|s| &s[..]) + } + pub fn links(&self) -> Option { + self.inner.links + } + pub fn namespaced_features(&self) -> bool { + self.inner.namespaced_features + } + + pub fn override_id(mut self, id: PackageId) -> Summary { + Rc::make_mut(&mut self.inner).package_id = id; + self + } + + pub fn set_checksum(&mut self, cksum: String) { + Rc::make_mut(&mut self.inner).checksum = Some(cksum); + } + + pub fn map_dependencies(mut self, f: F) -> Summary + where + F: FnMut(Dependency) -> Dependency, + { + { + let slot = &mut Rc::make_mut(&mut self.inner).dependencies; + let deps = mem::replace(slot, Vec::new()); + *slot = deps.into_iter().map(f).collect(); + } + self + } + + pub fn map_source(self, to_replace: SourceId, replace_with: SourceId) -> Summary { + let me = if self.package_id().source_id() == to_replace { + let new_id = self.package_id().with_source_id(replace_with); + self.override_id(new_id) + } else { + self + }; + me.map_dependencies(|dep| dep.map_source(to_replace, replace_with)) + } +} + +impl PartialEq for Summary { + fn eq(&self, other: &Summary) -> bool { + self.inner.package_id == other.inner.package_id + } +} + +impl Eq for Summary {} + +impl Hash for Summary { + fn hash(&self, state: &mut H) { + self.inner.package_id.hash(state); + } +} + +// Checks features for errors, bailing out a CargoResult:Err if invalid, +// and creates FeatureValues for each feature. +fn build_feature_map( + features: &BTreeMap, Vec>)>, + dependencies: &[Dependency], + namespaced: bool, +) -> CargoResult +where + K: Borrow + Ord + Display, +{ + use self::FeatureValue::*; + let mut dep_map = HashMap::new(); + for dep in dependencies.iter() { + dep_map + .entry(dep.name_in_toml()) + .or_insert_with(Vec::new) + .push(dep); + } + + let mut map = BTreeMap::new(); + for (feature, list) in features.iter() { + // If namespaced features is active and the key is the same as that of an + // optional dependency, that dependency must be included in the values. + // Thus, if a `feature` is found that has the same name as a dependency, we + // (a) bail out if the dependency is non-optional, and (b) we track if the + // feature requirements include the dependency `crate:feature` in the list. + // This is done with the `dependency_found` variable, which can only be + // false if features are namespaced and the current feature key is the same + // as the name of an optional dependency. If so, it gets set to true during + // iteration over the list if the dependency is found in the list. + let mut dependency_found = if namespaced { + match dep_map.get(feature.borrow()) { + Some(dep_data) => { + if !dep_data.iter().any(|d| d.is_optional()) { + failure::bail!( + "Feature `{}` includes the dependency of the same name, but this is \ + left implicit in the features included by this feature.\n\ + Additionally, the dependency must be marked as optional to be \ + included in the feature definition.\n\ + Consider adding `crate:{}` to this feature's requirements \ + and marking the dependency as `optional = true`", + feature, + feature + ) + } else { + false + } + } + None => true, + } + } else { + true + }; + + let mut values = vec![]; + for dep in list.1.as_slice() { + let val = FeatureValue::build( + InternedString::new(dep.as_ref()), + |fs| features.contains_key(fs.as_str()), + namespaced, + ); + + // Find data for the referenced dependency... + let dep_data = { + match val { + Feature(ref dep_name) | Crate(ref dep_name) | CrateFeature(ref dep_name, _) => { + dep_map.get(dep_name.as_str()) + } + } + }; + let is_optional_dep = dep_data + .iter() + .flat_map(|d| d.iter()) + .any(|d| d.is_optional()); + if let FeatureValue::Crate(ref dep_name) = val { + // If we have a dependency value, check if this is the dependency named + // the same as the feature that we were looking for. + if !dependency_found && feature.borrow() == dep_name.as_str() { + dependency_found = true; + } + } + + match (&val, dep_data.is_some(), is_optional_dep) { + // The value is a feature. If features are namespaced, this just means + // it's not prefixed with `crate:`, so we have to check whether the + // feature actually exist. If the feature is not defined *and* an optional + // dependency of the same name exists, the feature is defined implicitly + // here by adding it to the feature map, pointing to the dependency. + // If features are not namespaced, it's been validated as a feature already + // while instantiating the `FeatureValue` in `FeatureValue::build()`, so + // we don't have to do so here. + (&Feature(feat), _, true) => { + if namespaced && !features.contains_key(&*feat) { + map.insert(feat, (list.0.clone(), vec![FeatureValue::Crate(feat)])); + } + } + // If features are namespaced and the value is not defined as a feature + // and there is no optional dependency of the same name, error out. + // If features are not namespaced, there must be an existing feature + // here (checked by `FeatureValue::build()`), so it will always be defined. + (&Feature(feat), dep_exists, false) => { + if namespaced && !features.contains_key(&*feat) { + if dep_exists { + failure::bail!( + "Feature `{}` includes `{}` which is not defined as a feature.\n\ + A non-optional dependency of the same name is defined; consider \ + adding `optional = true` to its definition", + feature, + feat + ) + } else { + failure::bail!( + "Feature `{}` includes `{}` which is not defined as a feature", + feature, + feat + ) + } + } + } + // The value is a dependency. If features are namespaced, it is explicitly + // tagged as such (`crate:value`). If features are not namespaced, any value + // not recognized as a feature is pegged as a `Crate`. Here we handle the case + // where the dependency exists but is non-optional. It branches on namespaced + // just to provide the correct string for the crate dependency in the error. + (&Crate(ref dep), true, false) => { + if namespaced { + failure::bail!( + "Feature `{}` includes `crate:{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + } else { + failure::bail!( + "Feature `{}` depends on `{}` which is not an \ + optional dependency.\nConsider adding \ + `optional = true` to the dependency", + feature, + dep + ) + } + } + // If namespaced, the value was tagged as a dependency; if not namespaced, + // this could be anything not defined as a feature. This handles the case + // where no such dependency is actually defined; again, the branch on + // namespaced here is just to provide the correct string in the error. + (&Crate(ref dep), false, _) => { + if namespaced { + failure::bail!( + "Feature `{}` includes `crate:{}` which is not a known \ + dependency", + feature, + dep + ) + } else { + failure::bail!( + "Feature `{}` includes `{}` which is neither a dependency nor \ + another feature", + feature, + dep + ) + } + } + (&Crate(_), true, true) => {} + // If the value is a feature for one of the dependencies, bail out if no such + // dependency is actually defined in the manifest. + (&CrateFeature(ref dep, _), false, _) => failure::bail!( + "Feature `{}` requires a feature of `{}` which is not a \ + dependency", + feature, + dep + ), + (&CrateFeature(_, _), true, _) => {} + } + values.push(val); + } + + if !dependency_found { + // If we have not found the dependency of the same-named feature, we should + // bail here. + failure::bail!( + "Feature `{}` includes the optional dependency of the \ + same name, but this is left implicit in the features \ + included by this feature.\nConsider adding \ + `crate:{}` to this feature's requirements.", + feature, + feature + ) + } + + map.insert( + InternedString::new(feature.borrow()), + (list.0.clone(), values), + ); + } + Ok(map) +} + +/// FeatureValue represents the types of dependencies a feature can have: +/// +/// * Another feature +/// * An optional dependency +/// * A feature in a dependency +/// +/// The selection between these 3 things happens as part of the construction of the FeatureValue. +#[derive(Clone, Debug)] +pub enum FeatureValue { + Feature(InternedString), + Crate(InternedString), + CrateFeature(InternedString, InternedString), +} + +impl FeatureValue { + fn build(feature: InternedString, is_feature: T, namespaced: bool) -> FeatureValue + where + T: Fn(InternedString) -> bool, + { + match (feature.find('/'), namespaced) { + (Some(pos), _) => { + let (dep, dep_feat) = feature.split_at(pos); + let dep_feat = &dep_feat[1..]; + FeatureValue::CrateFeature(InternedString::new(dep), InternedString::new(dep_feat)) + } + (None, true) if feature.starts_with("crate:") => { + FeatureValue::Crate(InternedString::new(&feature[6..])) + } + (None, true) => FeatureValue::Feature(feature), + (None, false) if is_feature(feature) => FeatureValue::Feature(feature), + (None, false) => FeatureValue::Crate(feature), + } + } + + pub fn new(feature: InternedString, s: &Summary) -> FeatureValue { + Self::build( + feature, + |fs| s.features().contains_key(&fs), + s.namespaced_features(), + ) + } + + pub fn to_string(&self, s: &Summary) -> String { + use self::FeatureValue::*; + match *self { + Feature(ref f) => f.to_string(), + Crate(ref c) => { + if s.namespaced_features() { + format!("crate:{}", &c) + } else { + c.to_string() + } + } + CrateFeature(ref c, ref f) => [c.as_ref(), f.as_ref()].join("/"), + } + } +} + +impl Serialize for FeatureValue { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + use self::FeatureValue::*; + match *self { + Feature(ref f) => serializer.serialize_str(f), + Crate(ref c) => serializer.serialize_str(c), + CrateFeature(ref c, ref f) => { + serializer.serialize_str(&[c.as_ref(), f.as_ref()].join("/")) + } + } + } +} + +pub type FeatureMap = BTreeMap, Vec)>; +pub type RefFeatureMap<'a> = BTreeMap; diff --git a/src/cargo/ops/cargo_compile.rs b/src/cargo/ops/cargo_compile.rs index c68d4fc1b1d..59044b3289a 100644 --- a/src/cargo/ops/cargo_compile.rs +++ b/src/cargo/ops/cargo_compile.rs @@ -1,939 +1,939 @@ -//! The Cargo "compile" operation. -//! -//! This module contains the entry point for starting the compilation process -//! for commands like `build`, `test`, `doc`, `rustc`, etc. -//! -//! The `compile` function will do all the work to compile a workspace. A -//! rough outline is: -//! -//! - Resolve the dependency graph (see `ops::resolve`). -//! - Download any packages needed (see `PackageSet`). -//! - Generate a list of top-level "units" of work for the targets the user -//! requested on the command-line. Each `Unit` corresponds to a compiler -//! invocation. This is done in this module (`generate_targets`). -//! - Create a `Context` which will perform the following steps: -//! - Build the graph of `Unit` dependencies (see -//! `core::compiler::context::unit_dependencies`). -//! - Prepare the `target` directory (see `Layout`). -//! - Create a job queue (see `JobQueue`). The queue checks the -//! fingerprint of each `Unit` to determine if it should run or be -//! skipped. -//! - Execute the queue. Each leaf in the queue's dependency graph is -//! executed, and then removed from the graph when finished. This -//! repeats until the queue is empty. - -use std::collections::{BTreeSet, HashMap, HashSet}; -use std::iter::FromIterator; -use std::path::PathBuf; -use std::sync::Arc; - -use crate::core::compiler::{BuildConfig, BuildContext, Compilation, Context}; -use crate::core::compiler::{CompileMode, Kind, Unit}; -use crate::core::compiler::{DefaultExecutor, Executor, UnitInterner}; -use crate::core::profiles::{Profiles, UnitFor}; -use crate::core::resolver::{Resolve, ResolveOpts}; -use crate::core::{Package, Target}; -use crate::core::{PackageId, PackageIdSpec, TargetKind, Workspace}; -use crate::ops; -use crate::util::config::Config; -use crate::util::{closest_msg, profile, CargoResult}; - -/// Contains information about how a package should be compiled. -#[derive(Debug)] -pub struct CompileOptions<'a> { - pub config: &'a Config, - /// Configuration information for a rustc build - pub build_config: BuildConfig, - /// Extra features to build for the root package - pub features: Vec, - /// Flag whether all available features should be built for the root package - pub all_features: bool, - /// Flag if the default feature should be built for the root package - pub no_default_features: bool, - /// A set of packages to build. - pub spec: Packages, - /// Filter to apply to the root package to select which targets will be - /// built. - pub filter: CompileFilter, - /// Extra arguments to be passed to rustdoc (single target only) - pub target_rustdoc_args: Option>, - /// The specified target will be compiled with all the available arguments, - /// note that this only accounts for the *final* invocation of rustc - pub target_rustc_args: Option>, - /// Extra arguments passed to all selected targets for rustdoc. - pub local_rustdoc_args: Option>, - /// The directory to copy final artifacts to. Note that even if `out_dir` is - /// set, a copy of artifacts still could be found a `target/(debug\release)` - /// as usual. - // Note that, although the cmd-line flag name is `out-dir`, in code we use - // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. - pub export_dir: Option, -} - -impl<'a> CompileOptions<'a> { - pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult> { - Ok(CompileOptions { - config, - build_config: BuildConfig::new(config, None, &None, mode)?, - features: Vec::new(), - all_features: false, - no_default_features: false, - spec: ops::Packages::Packages(Vec::new()), - filter: CompileFilter::Default { - required_features_filterable: false, - }, - target_rustdoc_args: None, - target_rustc_args: None, - local_rustdoc_args: None, - export_dir: None, - }) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Packages { - Default, - All, - OptOut(Vec), - Packages(Vec), -} - -impl Packages { - pub fn from_flags(all: bool, exclude: Vec, package: Vec) -> CargoResult { - Ok(match (all, exclude.len(), package.len()) { - (false, 0, 0) => Packages::Default, - (false, 0, _) => Packages::Packages(package), - (false, _, _) => failure::bail!("--exclude can only be used together with --all"), - (true, 0, _) => Packages::All, - (true, _, _) => Packages::OptOut(exclude), - }) - } - - pub fn to_package_id_specs(&self, ws: &Workspace<'_>) -> CargoResult> { - let specs = match self { - Packages::All => ws - .members() - .map(Package::package_id) - .map(PackageIdSpec::from_package_id) - .collect(), - Packages::OptOut(opt_out) => { - let mut opt_out = BTreeSet::from_iter(opt_out.iter().cloned()); - let packages = ws - .members() - .filter(|pkg| !opt_out.remove(pkg.name().as_str())) - .map(Package::package_id) - .map(PackageIdSpec::from_package_id) - .collect(); - if !opt_out.is_empty() { - ws.config().shell().warn(format!( - "excluded package(s) {} not found in workspace `{}`", - opt_out - .iter() - .map(|x| x.as_ref()) - .collect::>() - .join(", "), - ws.root().display(), - ))?; - } - packages - } - Packages::Packages(packages) if packages.is_empty() => { - vec![PackageIdSpec::from_package_id(ws.current()?.package_id())] - } - Packages::Packages(packages) => packages - .iter() - .map(|p| PackageIdSpec::parse(p)) - .collect::>>()?, - Packages::Default => ws - .default_members() - .map(Package::package_id) - .map(PackageIdSpec::from_package_id) - .collect(), - }; - if specs.is_empty() { - if ws.is_virtual() { - failure::bail!( - "manifest path `{}` contains no package: The manifest is virtual, \ - and the workspace has no members.", - ws.root().display() - ) - } - failure::bail!("no packages to compile") - } - Ok(specs) - } - - pub fn get_packages<'ws>(&self, ws: &'ws Workspace<'_>) -> CargoResult> { - let packages: Vec<_> = match self { - Packages::Default => ws.default_members().collect(), - Packages::All => ws.members().collect(), - Packages::OptOut(opt_out) => ws - .members() - .filter(|pkg| !opt_out.iter().any(|name| pkg.name().as_str() == name)) - .collect(), - Packages::Packages(packages) => packages - .iter() - .map(|name| { - ws.members() - .find(|pkg| pkg.name().as_str() == name) - .ok_or_else(|| { - failure::format_err!( - "package `{}` is not a member of the workspace", - name - ) - }) - }) - .collect::>>()?, - }; - Ok(packages) - } - - /// Returns whether or not the user needs to pass a `-p` flag to target a - /// specific package in the workspace. - pub fn needs_spec_flag(&self, ws: &Workspace<'_>) -> bool { - match self { - Packages::Default => ws.default_members().count() > 1, - Packages::All => ws.members().count() > 1, - Packages::Packages(_) => true, - Packages::OptOut(_) => true, - } - } -} - -#[derive(Debug, PartialEq, Eq)] -pub enum LibRule { - /// Include the library, fail if not present - True, - /// Include the library if present - Default, - /// Exclude the library - False, -} - -#[derive(Debug)] -pub enum FilterRule { - All, - Just(Vec), -} - -#[derive(Debug)] -pub enum CompileFilter { - Default { - /// Flag whether targets can be safely skipped when required-features are not satisfied. - required_features_filterable: bool, - }, - Only { - all_targets: bool, - lib: LibRule, - bins: FilterRule, - examples: FilterRule, - tests: FilterRule, - benches: FilterRule, - }, -} - -pub fn compile<'a>( - ws: &Workspace<'a>, - options: &CompileOptions<'a>, -) -> CargoResult> { - let exec: Arc = Arc::new(DefaultExecutor); - compile_with_exec(ws, options, &exec) -} - -/// Like `compile` but allows specifying a custom `Executor` that will be able to intercept build -/// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through. -pub fn compile_with_exec<'a>( - ws: &Workspace<'a>, - options: &CompileOptions<'a>, - exec: &Arc, -) -> CargoResult> { - ws.emit_warnings()?; - compile_ws(ws, options, exec) -} - -pub fn compile_ws<'a>( - ws: &Workspace<'a>, - options: &CompileOptions<'a>, - exec: &Arc, -) -> CargoResult> { - let CompileOptions { - config, - ref build_config, - ref spec, - ref features, - all_features, - no_default_features, - ref filter, - ref target_rustdoc_args, - ref target_rustc_args, - ref local_rustdoc_args, - ref export_dir, - } = *options; - - match build_config.mode { - CompileMode::Test - | CompileMode::Build - | CompileMode::Check { .. } - | CompileMode::Bench - | CompileMode::RunCustomBuild => { - if std::env::var("RUST_FLAGS").is_ok() { - config.shell().warn( - "Cargo does not read `RUST_FLAGS` environment variable. Did you mean `RUSTFLAGS`?", - )?; - } - } - CompileMode::Doc { .. } | CompileMode::Doctest => { - if std::env::var("RUSTDOC_FLAGS").is_ok() { - config.shell().warn( - "Cargo does not read `RUSTDOC_FLAGS` environment variable. Did you mean `RUSTDOCFLAGS`?" - )?; - } - } - } - - let default_arch_kind = if build_config.requested_target.is_some() { - Kind::Target - } else { - Kind::Host - }; - - let specs = spec.to_package_id_specs(ws)?; - let dev_deps = ws.require_optional_deps() || filter.need_dev_deps(build_config.mode); - let opts = ResolveOpts::new(dev_deps, features, all_features, !no_default_features); - let resolve = ops::resolve_ws_with_opts(ws, opts, &specs)?; - let (packages, resolve_with_overrides) = resolve; - - let to_build_ids = specs - .iter() - .map(|s| s.query(resolve_with_overrides.iter())) - .collect::>>()?; - let mut to_builds = packages.get_many(to_build_ids)?; - - // The ordering here affects some error messages coming out of cargo, so - // let's be test and CLI friendly by always printing in the same order if - // there's an error. - to_builds.sort_by_key(|p| p.package_id()); - - for pkg in to_builds.iter() { - pkg.manifest().print_teapot(ws.config()); - - if build_config.mode.is_any_test() - && !ws.is_member(pkg) - && pkg.dependencies().iter().any(|dep| !dep.is_transitive()) - { - failure::bail!( - "package `{}` cannot be tested because it requires dev-dependencies \ - and is not a member of the workspace", - pkg.name() - ); - } - } - - let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) { - (&Some(ref args), _) => (Some(args.clone()), "rustc"), - (_, &Some(ref args)) => (Some(args.clone()), "rustdoc"), - _ => (None, ""), - }; - - if extra_args.is_some() && to_builds.len() != 1 { - panic!( - "`{}` should not accept multiple `-p` flags", - extra_args_name - ); - } - - let profiles = ws.profiles(); - profiles.validate_packages(&mut config.shell(), &packages)?; - - let interner = UnitInterner::new(); - let mut bcx = BuildContext::new( - ws, - &resolve_with_overrides, - &packages, - config, - build_config, - profiles, - &interner, - HashMap::new(), - )?; - let units = generate_targets( - ws, - profiles, - &to_builds, - filter, - default_arch_kind, - &resolve_with_overrides, - &bcx, - )?; - - if let Some(args) = extra_args { - if units.len() != 1 { - failure::bail!( - "extra arguments to `{}` can only be passed to one \ - target, consider filtering\nthe package by passing, \ - e.g., `--lib` or `--bin NAME` to specify a single target", - extra_args_name - ); - } - bcx.extra_compiler_args.insert(units[0], args); - } - if let Some(args) = local_rustdoc_args { - for unit in &units { - if unit.mode.is_doc() || unit.mode.is_doc_test() { - bcx.extra_compiler_args.insert(*unit, args.clone()); - } - } - } - - let ret = { - let _p = profile::start("compiling"); - let cx = Context::new(config, &bcx)?; - cx.compile(&units, export_dir.clone(), exec)? - }; - - Ok(ret) -} - -impl FilterRule { - pub fn new(targets: Vec, all: bool) -> FilterRule { - if all { - FilterRule::All - } else { - FilterRule::Just(targets) - } - } - - pub fn none() -> FilterRule { - FilterRule::Just(Vec::new()) - } - - fn matches(&self, target: &Target) -> bool { - match *self { - FilterRule::All => true, - FilterRule::Just(ref targets) => targets.iter().any(|x| *x == target.name()), - } - } - - fn is_specific(&self) -> bool { - match *self { - FilterRule::All => true, - FilterRule::Just(ref targets) => !targets.is_empty(), - } - } - - pub fn try_collect(&self) -> Option> { - match *self { - FilterRule::All => None, - FilterRule::Just(ref targets) => Some(targets.clone()), - } - } -} - -impl CompileFilter { - /// Construct a CompileFilter from raw command line arguments. - pub fn from_raw_arguments( - lib_only: bool, - bins: Vec, - all_bins: bool, - tsts: Vec, - all_tsts: bool, - exms: Vec, - all_exms: bool, - bens: Vec, - all_bens: bool, - all_targets: bool, - ) -> CompileFilter { - let rule_lib = if lib_only { - LibRule::True - } else { - LibRule::False - }; - let rule_bins = FilterRule::new(bins, all_bins); - let rule_tsts = FilterRule::new(tsts, all_tsts); - let rule_exms = FilterRule::new(exms, all_exms); - let rule_bens = FilterRule::new(bens, all_bens); - - if all_targets { - CompileFilter::Only { - all_targets: true, - lib: LibRule::Default, - bins: FilterRule::All, - examples: FilterRule::All, - benches: FilterRule::All, - tests: FilterRule::All, - } - } else { - CompileFilter::new(rule_lib, rule_bins, rule_tsts, rule_exms, rule_bens) - } - } - - /// Construct a CompileFilter from underlying primitives. - pub fn new( - rule_lib: LibRule, - rule_bins: FilterRule, - rule_tsts: FilterRule, - rule_exms: FilterRule, - rule_bens: FilterRule, - ) -> CompileFilter { - if rule_lib == LibRule::True - || rule_bins.is_specific() - || rule_tsts.is_specific() - || rule_exms.is_specific() - || rule_bens.is_specific() - { - CompileFilter::Only { - all_targets: false, - lib: rule_lib, - bins: rule_bins, - examples: rule_exms, - benches: rule_bens, - tests: rule_tsts, - } - } else { - CompileFilter::Default { - required_features_filterable: true, - } - } - } - - pub fn need_dev_deps(&self, mode: CompileMode) -> bool { - match mode { - CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true, - CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self - { - CompileFilter::Default { .. } => false, - CompileFilter::Only { - ref examples, - ref tests, - ref benches, - .. - } => examples.is_specific() || tests.is_specific() || benches.is_specific(), - }, - CompileMode::RunCustomBuild => panic!("Invalid mode"), - } - } - - // this selects targets for "cargo run". for logic to select targets for - // other subcommands, see generate_targets and filter_default_targets - pub fn target_run(&self, target: &Target) -> bool { - match *self { - CompileFilter::Default { .. } => true, - CompileFilter::Only { - ref lib, - ref bins, - ref examples, - ref tests, - ref benches, - .. - } => { - let rule = match *target.kind() { - TargetKind::Bin => bins, - TargetKind::Test => tests, - TargetKind::Bench => benches, - TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples, - TargetKind::Lib(..) => { - return match *lib { - LibRule::True => true, - LibRule::Default => true, - LibRule::False => false, - }; - } - TargetKind::CustomBuild => return false, - }; - rule.matches(target) - } - } - } - - pub fn is_specific(&self) -> bool { - match *self { - CompileFilter::Default { .. } => false, - CompileFilter::Only { .. } => true, - } - } -} - -/// A proposed target. -/// -/// Proposed targets are later filtered into actual `Unit`s based on whether or -/// not the target requires its features to be present. -#[derive(Debug)] -struct Proposal<'a> { - pkg: &'a Package, - target: &'a Target, - /// Indicates whether or not all required features *must* be present. If - /// false, and the features are not available, then it will be silently - /// skipped. Generally, targets specified by name (`--bin foo`) are - /// required, all others can be silently skipped if features are missing. - requires_features: bool, - mode: CompileMode, -} - -/// Generates all the base targets for the packages the user has requested to -/// compile. Dependencies for these targets are computed later in `unit_dependencies`. -fn generate_targets<'a>( - ws: &Workspace<'_>, - profiles: &Profiles, - packages: &[&'a Package], - filter: &CompileFilter, - default_arch_kind: Kind, - resolve: &Resolve, - bcx: &BuildContext<'a, '_>, -) -> CargoResult>> { - // Helper for creating a `Unit` struct. - let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| { - let unit_for = if bcx.build_config.mode.is_any_test() { - // NOTE: the `UnitFor` here is subtle. If you have a profile - // with `panic` set, the `panic` flag is cleared for - // tests/benchmarks and their dependencies. If this - // was `normal`, then the lib would get compiled three - // times (once with panic, once without, and once with - // `--test`). - // - // This would cause a problem for doc tests, which would fail - // because `rustdoc` would attempt to link with both libraries - // at the same time. Also, it's probably not important (or - // even desirable?) for rustdoc to link with a lib with - // `panic` set. - // - // As a consequence, Examples and Binaries get compiled - // without `panic` set. This probably isn't a bad deal. - // - // Forcing the lib to be compiled three times during `cargo - // test` is probably also not desirable. - UnitFor::new_test() - } else if target.for_host() { - // Proc macro / plugin should not have `panic` set. - UnitFor::new_compiler() - } else { - UnitFor::new_normal() - }; - // Custom build units are added in `build_unit_dependencies`. - assert!(!target.is_custom_build()); - let target_mode = match target_mode { - CompileMode::Test => { - if target.is_example() && !filter.is_specific() && !target.tested() { - // Examples are included as regular binaries to verify - // that they compile. - CompileMode::Build - } else { - CompileMode::Test - } - } - CompileMode::Build => match *target.kind() { - TargetKind::Test => CompileMode::Test, - TargetKind::Bench => CompileMode::Bench, - _ => CompileMode::Build, - }, - // `CompileMode::Bench` is only used to inform `filter_default_targets` - // which command is being used (`cargo bench`). Afterwards, tests - // and benches are treated identically. Switching the mode allows - // de-duplication of units that are essentially identical. For - // example, `cargo build --all-targets --release` creates the units - // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) - // and since these are the same, we want them to be de-duplicated in - // `unit_dependencies`. - CompileMode::Bench => CompileMode::Test, - _ => target_mode, - }; - // Plugins or proc macros should be built for the host. - let kind = if target.for_host() { - Kind::Host - } else { - default_arch_kind - }; - let profile = profiles.get_profile( - pkg.package_id(), - ws.is_member(pkg), - unit_for, - target_mode, - bcx.build_config.release, - ); - bcx.units.intern(pkg, target, profile, kind, target_mode) - }; - - // Create a list of proposed targets. - let mut proposals: Vec> = Vec::new(); - - match *filter { - CompileFilter::Default { - required_features_filterable, - } => { - for pkg in packages { - let default = filter_default_targets(pkg.targets(), bcx.build_config.mode); - proposals.extend(default.into_iter().map(|target| Proposal { - pkg, - target, - requires_features: !required_features_filterable, - mode: bcx.build_config.mode, - })); - if bcx.build_config.mode == CompileMode::Test { - if let Some(t) = pkg - .targets() - .iter() - .find(|t| t.is_lib() && t.doctested() && t.doctestable()) - { - proposals.push(Proposal { - pkg, - target: t, - requires_features: false, - mode: CompileMode::Doctest, - }); - } - } - } - } - CompileFilter::Only { - all_targets, - ref lib, - ref bins, - ref examples, - ref tests, - ref benches, - } => { - if *lib != LibRule::False { - let mut libs = Vec::new(); - for proposal in - filter_targets(packages, Target::is_lib, false, bcx.build_config.mode) - { - let Proposal { target, pkg, .. } = proposal; - if bcx.build_config.mode.is_doc_test() && !target.doctestable() { - ws.config().shell().warn(format!( - "doc tests are not supported for crate type(s) `{}` in package `{}`", - target.rustc_crate_types().join(", "), - pkg.name() - ))?; - } else { - libs.push(proposal) - } - } - if !all_targets && libs.is_empty() && *lib == LibRule::True { - let names = packages.iter().map(|pkg| pkg.name()).collect::>(); - if names.len() == 1 { - failure::bail!("no library targets found in package `{}`", names[0]); - } else { - failure::bail!( - "no library targets found in packages: {}", - names.join(", ") - ); - } - } - proposals.extend(libs); - } - - // If `--tests` was specified, add all targets that would be - // generated by `cargo test`. - let test_filter = match tests { - FilterRule::All => Target::tested, - FilterRule::Just(_) => Target::is_test, - }; - let test_mode = match bcx.build_config.mode { - CompileMode::Build => CompileMode::Test, - CompileMode::Check { .. } => CompileMode::Check { test: true }, - _ => bcx.build_config.mode, - }; - // If `--benches` was specified, add all targets that would be - // generated by `cargo bench`. - let bench_filter = match benches { - FilterRule::All => Target::benched, - FilterRule::Just(_) => Target::is_bench, - }; - let bench_mode = match bcx.build_config.mode { - CompileMode::Build => CompileMode::Bench, - CompileMode::Check { .. } => CompileMode::Check { test: true }, - _ => bcx.build_config.mode, - }; - - proposals.extend(list_rule_targets( - packages, - bins, - "bin", - Target::is_bin, - bcx.build_config.mode, - )?); - proposals.extend(list_rule_targets( - packages, - examples, - "example", - Target::is_example, - bcx.build_config.mode, - )?); - proposals.extend(list_rule_targets( - packages, - tests, - "test", - test_filter, - test_mode, - )?); - proposals.extend(list_rule_targets( - packages, - benches, - "bench", - bench_filter, - bench_mode, - )?); - } - } - - // Only include targets that are libraries or have all required - // features available. - let mut features_map = HashMap::new(); - let mut units = HashSet::new(); - for Proposal { - pkg, - target, - requires_features, - mode, - } in proposals - { - let unavailable_features = match target.required_features() { - Some(rf) => { - let features = features_map - .entry(pkg) - .or_insert_with(|| resolve_all_features(resolve, pkg.package_id())); - rf.iter().filter(|f| !features.contains(*f)).collect() - } - None => Vec::new(), - }; - if target.is_lib() || unavailable_features.is_empty() { - let unit = new_unit(pkg, target, mode); - units.insert(unit); - } else if requires_features { - let required_features = target.required_features().unwrap(); - let quoted_required_features: Vec = required_features - .iter() - .map(|s| format!("`{}`", s)) - .collect(); - failure::bail!( - "target `{}` in package `{}` requires the features: {}\n\ - Consider enabling them by passing, e.g., `--features=\"{}\"`", - target.name(), - pkg.name(), - quoted_required_features.join(", "), - required_features.join(" ") - ); - } - // else, silently skip target. - } - Ok(units.into_iter().collect()) -} - -fn resolve_all_features( - resolve_with_overrides: &Resolve, - package_id: PackageId, -) -> HashSet { - let mut features = resolve_with_overrides.features(package_id).clone(); - - // Include features enabled for use by dependencies so targets can also use them with the - // required-features field when deciding whether to be built or skipped. - for (dep, _) in resolve_with_overrides.deps(package_id) { - for feature in resolve_with_overrides.features(dep) { - features.insert(dep.name().to_string() + "/" + feature); - } - } - - features -} - -/// Given a list of all targets for a package, filters out only the targets -/// that are automatically included when the user doesn't specify any targets. -fn filter_default_targets(targets: &[Target], mode: CompileMode) -> Vec<&Target> { - match mode { - CompileMode::Bench => targets.iter().filter(|t| t.benched()).collect(), - CompileMode::Test => targets - .iter() - .filter(|t| t.tested() || t.is_example()) - .collect(), - CompileMode::Build | CompileMode::Check { .. } => targets - .iter() - .filter(|t| t.is_bin() || t.is_lib()) - .collect(), - CompileMode::Doc { .. } => { - // `doc` does lib and bins (bin with same name as lib is skipped). - targets - .iter() - .filter(|t| { - t.documented() - && (!t.is_bin() - || !targets.iter().any(|l| l.is_lib() && l.name() == t.name())) - }) - .collect() - } - CompileMode::Doctest | CompileMode::RunCustomBuild => panic!("Invalid mode {:?}", mode), - } -} - -/// Returns a list of proposed targets based on command-line target selection flags. -fn list_rule_targets<'a>( - packages: &[&'a Package], - rule: &FilterRule, - target_desc: &'static str, - is_expected_kind: fn(&Target) -> bool, - mode: CompileMode, -) -> CargoResult>> { - let mut proposals = Vec::new(); - match rule { - FilterRule::All => { - proposals.extend(filter_targets(packages, is_expected_kind, false, mode)) - } - FilterRule::Just(names) => { - for name in names { - proposals.extend(find_named_targets( - packages, - name, - target_desc, - is_expected_kind, - mode, - )?); - } - } - } - Ok(proposals) -} - -/// Finds the targets for a specifically named target. -fn find_named_targets<'a>( - packages: &[&'a Package], - target_name: &str, - target_desc: &'static str, - is_expected_kind: fn(&Target) -> bool, - mode: CompileMode, -) -> CargoResult>> { - let filter = |t: &Target| t.name() == target_name && is_expected_kind(t); - let proposals = filter_targets(packages, filter, true, mode); - if proposals.is_empty() { - let targets = packages.iter().flat_map(|pkg| { - pkg.targets() - .iter() - .filter(|target| is_expected_kind(target)) - }); - let suggestion = closest_msg(target_name, targets, |t| t.name()); - failure::bail!( - "no {} target named `{}`{}", - target_desc, - target_name, - suggestion - ); - } - Ok(proposals) -} - -fn filter_targets<'a>( - packages: &[&'a Package], - predicate: impl Fn(&Target) -> bool, - requires_features: bool, - mode: CompileMode, -) -> Vec> { - let mut proposals = Vec::new(); - for pkg in packages { - for target in pkg.targets().iter().filter(|t| predicate(t)) { - proposals.push(Proposal { - pkg, - target, - requires_features, - mode, - }); - } - } - proposals -} +//! The Cargo "compile" operation. +//! +//! This module contains the entry point for starting the compilation process +//! for commands like `build`, `test`, `doc`, `rustc`, etc. +//! +//! The `compile` function will do all the work to compile a workspace. A +//! rough outline is: +//! +//! - Resolve the dependency graph (see `ops::resolve`). +//! - Download any packages needed (see `PackageSet`). +//! - Generate a list of top-level "units" of work for the targets the user +//! requested on the command-line. Each `Unit` corresponds to a compiler +//! invocation. This is done in this module (`generate_targets`). +//! - Create a `Context` which will perform the following steps: +//! - Build the graph of `Unit` dependencies (see +//! `core::compiler::context::unit_dependencies`). +//! - Prepare the `target` directory (see `Layout`). +//! - Create a job queue (see `JobQueue`). The queue checks the +//! fingerprint of each `Unit` to determine if it should run or be +//! skipped. +//! - Execute the queue. Each leaf in the queue's dependency graph is +//! executed, and then removed from the graph when finished. This +//! repeats until the queue is empty. + +use std::collections::{BTreeSet, HashMap, HashSet}; +use std::iter::FromIterator; +use std::path::PathBuf; +use std::sync::Arc; + +use crate::core::compiler::{BuildConfig, BuildContext, Compilation, Context}; +use crate::core::compiler::{CompileMode, Kind, Unit}; +use crate::core::compiler::{DefaultExecutor, Executor, UnitInterner}; +use crate::core::profiles::{Profiles, UnitFor}; +use crate::core::resolver::{Resolve, ResolveOpts}; +use crate::core::{Package, Target}; +use crate::core::{PackageId, PackageIdSpec, TargetKind, Workspace}; +use crate::ops; +use crate::util::config::Config; +use crate::util::{closest_msg, profile, CargoResult, Platform}; + +/// Contains information about how a package should be compiled. +#[derive(Debug)] +pub struct CompileOptions<'a> { + pub config: &'a Config, + /// Configuration information for a rustc build + pub build_config: BuildConfig, + /// Extra features to build for the root package + pub features: Vec, + /// Flag whether all available features should be built for the root package + pub all_features: bool, + /// Flag if the default feature should be built for the root package + pub no_default_features: bool, + /// A set of packages to build. + pub spec: Packages, + /// Filter to apply to the root package to select which targets will be + /// built. + pub filter: CompileFilter, + /// Extra arguments to be passed to rustdoc (single target only) + pub target_rustdoc_args: Option>, + /// The specified target will be compiled with all the available arguments, + /// note that this only accounts for the *final* invocation of rustc + pub target_rustc_args: Option>, + /// Extra arguments passed to all selected targets for rustdoc. + pub local_rustdoc_args: Option>, + /// The directory to copy final artifacts to. Note that even if `out_dir` is + /// set, a copy of artifacts still could be found a `target/(debug\release)` + /// as usual. + // Note that, although the cmd-line flag name is `out-dir`, in code we use + // `export_dir`, to avoid confusion with out dir at `target/debug/deps`. + pub export_dir: Option, +} + +impl<'a> CompileOptions<'a> { + pub fn new(config: &'a Config, mode: CompileMode) -> CargoResult> { + Ok(CompileOptions { + config, + build_config: BuildConfig::new(config, None, &None, mode)?, + features: Vec::new(), + all_features: false, + no_default_features: false, + spec: ops::Packages::Packages(Vec::new()), + filter: CompileFilter::Default { + required_features_filterable: false, + }, + target_rustdoc_args: None, + target_rustc_args: None, + local_rustdoc_args: None, + export_dir: None, + }) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Packages { + Default, + All, + OptOut(Vec), + Packages(Vec), +} + +impl Packages { + pub fn from_flags(all: bool, exclude: Vec, package: Vec) -> CargoResult { + Ok(match (all, exclude.len(), package.len()) { + (false, 0, 0) => Packages::Default, + (false, 0, _) => Packages::Packages(package), + (false, _, _) => failure::bail!("--exclude can only be used together with --all"), + (true, 0, _) => Packages::All, + (true, _, _) => Packages::OptOut(exclude), + }) + } + + pub fn to_package_id_specs(&self, ws: &Workspace<'_>) -> CargoResult> { + let specs = match self { + Packages::All => ws + .members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + Packages::OptOut(opt_out) => { + let mut opt_out = BTreeSet::from_iter(opt_out.iter().cloned()); + let packages = ws + .members() + .filter(|pkg| !opt_out.remove(pkg.name().as_str())) + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(); + if !opt_out.is_empty() { + ws.config().shell().warn(format!( + "excluded package(s) {} not found in workspace `{}`", + opt_out + .iter() + .map(|x| x.as_ref()) + .collect::>() + .join(", "), + ws.root().display(), + ))?; + } + packages + } + Packages::Packages(packages) if packages.is_empty() => { + vec![PackageIdSpec::from_package_id(ws.current()?.package_id())] + } + Packages::Packages(packages) => packages + .iter() + .map(|p| PackageIdSpec::parse(p)) + .collect::>>()?, + Packages::Default => ws + .default_members() + .map(Package::package_id) + .map(PackageIdSpec::from_package_id) + .collect(), + }; + if specs.is_empty() { + if ws.is_virtual() { + failure::bail!( + "manifest path `{}` contains no package: The manifest is virtual, \ + and the workspace has no members.", + ws.root().display() + ) + } + failure::bail!("no packages to compile") + } + Ok(specs) + } + + pub fn get_packages<'ws>(&self, ws: &'ws Workspace<'_>) -> CargoResult> { + let packages: Vec<_> = match self { + Packages::Default => ws.default_members().collect(), + Packages::All => ws.members().collect(), + Packages::OptOut(opt_out) => ws + .members() + .filter(|pkg| !opt_out.iter().any(|name| pkg.name().as_str() == name)) + .collect(), + Packages::Packages(packages) => packages + .iter() + .map(|name| { + ws.members() + .find(|pkg| pkg.name().as_str() == name) + .ok_or_else(|| { + failure::format_err!( + "package `{}` is not a member of the workspace", + name + ) + }) + }) + .collect::>>()?, + }; + Ok(packages) + } + + /// Returns whether or not the user needs to pass a `-p` flag to target a + /// specific package in the workspace. + pub fn needs_spec_flag(&self, ws: &Workspace<'_>) -> bool { + match self { + Packages::Default => ws.default_members().count() > 1, + Packages::All => ws.members().count() > 1, + Packages::Packages(_) => true, + Packages::OptOut(_) => true, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum LibRule { + /// Include the library, fail if not present + True, + /// Include the library if present + Default, + /// Exclude the library + False, +} + +#[derive(Debug)] +pub enum FilterRule { + All, + Just(Vec), +} + +#[derive(Debug)] +pub enum CompileFilter { + Default { + /// Flag whether targets can be safely skipped when required-features are not satisfied. + required_features_filterable: bool, + }, + Only { + all_targets: bool, + lib: LibRule, + bins: FilterRule, + examples: FilterRule, + tests: FilterRule, + benches: FilterRule, + }, +} + +pub fn compile<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, +) -> CargoResult> { + let exec: Arc = Arc::new(DefaultExecutor); + compile_with_exec(ws, options, &exec) +} + +/// Like `compile` but allows specifying a custom `Executor` that will be able to intercept build +/// calls and add custom logic. `compile` uses `DefaultExecutor` which just passes calls through. +pub fn compile_with_exec<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, + exec: &Arc, +) -> CargoResult> { + ws.emit_warnings()?; + compile_ws(ws, options, exec) +} + +pub fn compile_ws<'a>( + ws: &Workspace<'a>, + options: &CompileOptions<'a>, + exec: &Arc, +) -> CargoResult> { + let CompileOptions { + config, + ref build_config, + ref spec, + ref features, + all_features, + no_default_features, + ref filter, + ref target_rustdoc_args, + ref target_rustc_args, + ref local_rustdoc_args, + ref export_dir, + } = *options; + + match build_config.mode { + CompileMode::Test + | CompileMode::Build + | CompileMode::Check { .. } + | CompileMode::Bench + | CompileMode::RunCustomBuild => { + if std::env::var("RUST_FLAGS").is_ok() { + config.shell().warn( + "Cargo does not read `RUST_FLAGS` environment variable. Did you mean `RUSTFLAGS`?", + )?; + } + } + CompileMode::Doc { .. } | CompileMode::Doctest => { + if std::env::var("RUSTDOC_FLAGS").is_ok() { + config.shell().warn( + "Cargo does not read `RUSTDOC_FLAGS` environment variable. Did you mean `RUSTDOCFLAGS`?" + )?; + } + } + } + + let default_arch_kind = if build_config.requested_target.is_some() { + Kind::Target + } else { + Kind::Host + }; + + let specs = spec.to_package_id_specs(ws)?; + let dev_deps = ws.require_optional_deps() || filter.need_dev_deps(build_config.mode); + let opts = ResolveOpts::new(dev_deps, features, all_features, !no_default_features); + let resolve = ops::resolve_ws_with_opts(ws, opts, &specs)?; + let (packages, resolve_with_overrides) = resolve; + + let to_build_ids = specs + .iter() + .map(|s| s.query(resolve_with_overrides.iter())) + .collect::>>()?; + let mut to_builds = packages.get_many(to_build_ids)?; + + // The ordering here affects some error messages coming out of cargo, so + // let's be test and CLI friendly by always printing in the same order if + // there's an error. + to_builds.sort_by_key(|p| p.package_id()); + + for pkg in to_builds.iter() { + pkg.manifest().print_teapot(ws.config()); + + if build_config.mode.is_any_test() + && !ws.is_member(pkg) + && pkg.dependencies().iter().any(|dep| !dep.is_transitive()) + { + failure::bail!( + "package `{}` cannot be tested because it requires dev-dependencies \ + and is not a member of the workspace", + pkg.name() + ); + } + } + + let (extra_args, extra_args_name) = match (target_rustc_args, target_rustdoc_args) { + (&Some(ref args), _) => (Some(args.clone()), "rustc"), + (_, &Some(ref args)) => (Some(args.clone()), "rustdoc"), + _ => (None, ""), + }; + + if extra_args.is_some() && to_builds.len() != 1 { + panic!( + "`{}` should not accept multiple `-p` flags", + extra_args_name + ); + } + + let profiles = ws.profiles(); + profiles.validate_packages(&mut config.shell(), &packages)?; + + let interner = UnitInterner::new(); + let mut bcx = BuildContext::new( + ws, + &resolve_with_overrides, + &packages, + config, + build_config, + profiles, + &interner, + HashMap::new(), + )?; + let units = generate_targets( + ws, + profiles, + &to_builds, + filter, + default_arch_kind, + &resolve_with_overrides, + &bcx, + )?; + + if let Some(args) = extra_args { + if units.len() != 1 { + failure::bail!( + "extra arguments to `{}` can only be passed to one \ + target, consider filtering\nthe package by passing, \ + e.g., `--lib` or `--bin NAME` to specify a single target", + extra_args_name + ); + } + bcx.extra_compiler_args.insert(units[0], args); + } + if let Some(args) = local_rustdoc_args { + for unit in &units { + if unit.mode.is_doc() || unit.mode.is_doc_test() { + bcx.extra_compiler_args.insert(*unit, args.clone()); + } + } + } + + let ret = { + let _p = profile::start("compiling"); + let cx = Context::new(config, &bcx)?; + cx.compile(&units, export_dir.clone(), exec)? + }; + + Ok(ret) +} + +impl FilterRule { + pub fn new(targets: Vec, all: bool) -> FilterRule { + if all { + FilterRule::All + } else { + FilterRule::Just(targets) + } + } + + pub fn none() -> FilterRule { + FilterRule::Just(Vec::new()) + } + + fn matches(&self, target: &Target) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => targets.iter().any(|x| *x == target.name()), + } + } + + fn is_specific(&self) -> bool { + match *self { + FilterRule::All => true, + FilterRule::Just(ref targets) => !targets.is_empty(), + } + } + + pub fn try_collect(&self) -> Option> { + match *self { + FilterRule::All => None, + FilterRule::Just(ref targets) => Some(targets.clone()), + } + } +} + +impl CompileFilter { + /// Construct a CompileFilter from raw command line arguments. + pub fn from_raw_arguments( + lib_only: bool, + bins: Vec, + all_bins: bool, + tsts: Vec, + all_tsts: bool, + exms: Vec, + all_exms: bool, + bens: Vec, + all_bens: bool, + all_targets: bool, + ) -> CompileFilter { + let rule_lib = if lib_only { + LibRule::True + } else { + LibRule::False + }; + let rule_bins = FilterRule::new(bins, all_bins); + let rule_tsts = FilterRule::new(tsts, all_tsts); + let rule_exms = FilterRule::new(exms, all_exms); + let rule_bens = FilterRule::new(bens, all_bens); + + if all_targets { + CompileFilter::Only { + all_targets: true, + lib: LibRule::Default, + bins: FilterRule::All, + examples: FilterRule::All, + benches: FilterRule::All, + tests: FilterRule::All, + } + } else { + CompileFilter::new(rule_lib, rule_bins, rule_tsts, rule_exms, rule_bens) + } + } + + /// Construct a CompileFilter from underlying primitives. + pub fn new( + rule_lib: LibRule, + rule_bins: FilterRule, + rule_tsts: FilterRule, + rule_exms: FilterRule, + rule_bens: FilterRule, + ) -> CompileFilter { + if rule_lib == LibRule::True + || rule_bins.is_specific() + || rule_tsts.is_specific() + || rule_exms.is_specific() + || rule_bens.is_specific() + { + CompileFilter::Only { + all_targets: false, + lib: rule_lib, + bins: rule_bins, + examples: rule_exms, + benches: rule_bens, + tests: rule_tsts, + } + } else { + CompileFilter::Default { + required_features_filterable: true, + } + } + } + + pub fn need_dev_deps(&self, mode: CompileMode) -> bool { + match mode { + CompileMode::Test | CompileMode::Doctest | CompileMode::Bench => true, + CompileMode::Build | CompileMode::Doc { .. } | CompileMode::Check { .. } => match *self + { + CompileFilter::Default { .. } => false, + CompileFilter::Only { + ref examples, + ref tests, + ref benches, + .. + } => examples.is_specific() || tests.is_specific() || benches.is_specific(), + }, + CompileMode::RunCustomBuild => panic!("Invalid mode"), + } + } + + // this selects targets for "cargo run". for logic to select targets for + // other subcommands, see generate_targets and filter_default_targets + pub fn target_run(&self, target: &Target) -> bool { + match *self { + CompileFilter::Default { .. } => true, + CompileFilter::Only { + ref lib, + ref bins, + ref examples, + ref tests, + ref benches, + .. + } => { + let rule = match *target.kind() { + TargetKind::Bin => bins, + TargetKind::Test => tests, + TargetKind::Bench => benches, + TargetKind::ExampleBin | TargetKind::ExampleLib(..) => examples, + TargetKind::Lib(..) => { + return match *lib { + LibRule::True => true, + LibRule::Default => true, + LibRule::False => false, + }; + } + TargetKind::CustomBuild => return false, + }; + rule.matches(target) + } + } + } + + pub fn is_specific(&self) -> bool { + match *self { + CompileFilter::Default { .. } => false, + CompileFilter::Only { .. } => true, + } + } +} + +/// A proposed target. +/// +/// Proposed targets are later filtered into actual `Unit`s based on whether or +/// not the target requires its features to be present. +#[derive(Debug)] +struct Proposal<'a> { + pkg: &'a Package, + target: &'a Target, + /// Indicates whether or not all required features *must* be present. If + /// false, and the features are not available, then it will be silently + /// skipped. Generally, targets specified by name (`--bin foo`) are + /// required, all others can be silently skipped if features are missing. + requires_features: bool, + mode: CompileMode, +} + +/// Generates all the base targets for the packages the user has requested to +/// compile. Dependencies for these targets are computed later in `unit_dependencies`. +fn generate_targets<'a>( + ws: &Workspace<'_>, + profiles: &Profiles, + packages: &[&'a Package], + filter: &CompileFilter, + default_arch_kind: Kind, + resolve: &Resolve, + bcx: &BuildContext<'a, '_>, +) -> CargoResult>> { + // Helper for creating a `Unit` struct. + let new_unit = |pkg: &'a Package, target: &'a Target, target_mode: CompileMode| { + let unit_for = if bcx.build_config.mode.is_any_test() { + // NOTE: the `UnitFor` here is subtle. If you have a profile + // with `panic` set, the `panic` flag is cleared for + // tests/benchmarks and their dependencies. If this + // was `normal`, then the lib would get compiled three + // times (once with panic, once without, and once with + // `--test`). + // + // This would cause a problem for doc tests, which would fail + // because `rustdoc` would attempt to link with both libraries + // at the same time. Also, it's probably not important (or + // even desirable?) for rustdoc to link with a lib with + // `panic` set. + // + // As a consequence, Examples and Binaries get compiled + // without `panic` set. This probably isn't a bad deal. + // + // Forcing the lib to be compiled three times during `cargo + // test` is probably also not desirable. + UnitFor::new_test() + } else if target.for_host() { + // Proc macro / plugin should not have `panic` set. + UnitFor::new_compiler() + } else { + UnitFor::new_normal() + }; + // Custom build units are added in `build_unit_dependencies`. + assert!(!target.is_custom_build()); + let target_mode = match target_mode { + CompileMode::Test => { + if target.is_example() && !filter.is_specific() && !target.tested() { + // Examples are included as regular binaries to verify + // that they compile. + CompileMode::Build + } else { + CompileMode::Test + } + } + CompileMode::Build => match *target.kind() { + TargetKind::Test => CompileMode::Test, + TargetKind::Bench => CompileMode::Bench, + _ => CompileMode::Build, + }, + // `CompileMode::Bench` is only used to inform `filter_default_targets` + // which command is being used (`cargo bench`). Afterwards, tests + // and benches are treated identically. Switching the mode allows + // de-duplication of units that are essentially identical. For + // example, `cargo build --all-targets --release` creates the units + // (lib profile:bench, mode:test) and (lib profile:bench, mode:bench) + // and since these are the same, we want them to be de-duplicated in + // `unit_dependencies`. + CompileMode::Bench => CompileMode::Test, + _ => target_mode, + }; + // Plugins or proc macros should be built for the host. + let kind = if target.for_host() { + Kind::Host + } else { + default_arch_kind + }; + let profile = profiles.get_profile( + pkg.package_id(), + ws.is_member(pkg), + unit_for, + target_mode, + bcx.build_config.release, + ); + bcx.units.intern(pkg, target, profile, kind, target_mode) + }; + + // Create a list of proposed targets. + let mut proposals: Vec> = Vec::new(); + + match *filter { + CompileFilter::Default { + required_features_filterable, + } => { + for pkg in packages { + let default = filter_default_targets(pkg.targets(), bcx.build_config.mode); + proposals.extend(default.into_iter().map(|target| Proposal { + pkg, + target, + requires_features: !required_features_filterable, + mode: bcx.build_config.mode, + })); + if bcx.build_config.mode == CompileMode::Test { + if let Some(t) = pkg + .targets() + .iter() + .find(|t| t.is_lib() && t.doctested() && t.doctestable()) + { + proposals.push(Proposal { + pkg, + target: t, + requires_features: false, + mode: CompileMode::Doctest, + }); + } + } + } + } + CompileFilter::Only { + all_targets, + ref lib, + ref bins, + ref examples, + ref tests, + ref benches, + } => { + if *lib != LibRule::False { + let mut libs = Vec::new(); + for proposal in + filter_targets(packages, Target::is_lib, false, bcx.build_config.mode) + { + let Proposal { target, pkg, .. } = proposal; + if bcx.build_config.mode.is_doc_test() && !target.doctestable() { + ws.config().shell().warn(format!( + "doc tests are not supported for crate type(s) `{}` in package `{}`", + target.rustc_crate_types().join(", "), + pkg.name() + ))?; + } else { + libs.push(proposal) + } + } + if !all_targets && libs.is_empty() && *lib == LibRule::True { + let names = packages.iter().map(|pkg| pkg.name()).collect::>(); + if names.len() == 1 { + failure::bail!("no library targets found in package `{}`", names[0]); + } else { + failure::bail!( + "no library targets found in packages: {}", + names.join(", ") + ); + } + } + proposals.extend(libs); + } + + // If `--tests` was specified, add all targets that would be + // generated by `cargo test`. + let test_filter = match tests { + FilterRule::All => Target::tested, + FilterRule::Just(_) => Target::is_test, + }; + let test_mode = match bcx.build_config.mode { + CompileMode::Build => CompileMode::Test, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => bcx.build_config.mode, + }; + // If `--benches` was specified, add all targets that would be + // generated by `cargo bench`. + let bench_filter = match benches { + FilterRule::All => Target::benched, + FilterRule::Just(_) => Target::is_bench, + }; + let bench_mode = match bcx.build_config.mode { + CompileMode::Build => CompileMode::Bench, + CompileMode::Check { .. } => CompileMode::Check { test: true }, + _ => bcx.build_config.mode, + }; + + proposals.extend(list_rule_targets( + packages, + bins, + "bin", + Target::is_bin, + bcx.build_config.mode, + )?); + proposals.extend(list_rule_targets( + packages, + examples, + "example", + Target::is_example, + bcx.build_config.mode, + )?); + proposals.extend(list_rule_targets( + packages, + tests, + "test", + test_filter, + test_mode, + )?); + proposals.extend(list_rule_targets( + packages, + benches, + "bench", + bench_filter, + bench_mode, + )?); + } + } + + // Only include targets that are libraries or have all required + // features available. + let mut features_map = HashMap::new(); + let mut units = HashSet::new(); + for Proposal { + pkg, + target, + requires_features, + mode, + } in proposals + { + let unavailable_features = match target.required_features() { + Some(rf) => { + let features = features_map + .entry(pkg) + .or_insert_with(|| resolve_all_features(resolve, pkg.package_id())); + rf.iter().filter(|f| !features.contains_key(*f)).collect() + } + None => Vec::new(), + }; + if target.is_lib() || unavailable_features.is_empty() { + let unit = new_unit(pkg, target, mode); + units.insert(unit); + } else if requires_features { + let required_features = target.required_features().unwrap(); + let quoted_required_features: Vec = required_features + .iter() + .map(|s| format!("`{}`", s)) + .collect(); + failure::bail!( + "target `{}` in package `{}` requires the features: {}\n\ + Consider enabling them by passing, e.g., `--features=\"{}\"`", + target.name(), + pkg.name(), + quoted_required_features.join(", "), + required_features.join(" ") + ); + } + // else, silently skip target. + } + Ok(units.into_iter().collect()) +} + +fn resolve_all_features( + resolve_with_overrides: &Resolve, + package_id: PackageId, +) -> HashMap> { + let mut features = resolve_with_overrides.features(package_id).clone(); + + // Include features enabled for use by dependencies so targets can also use them with the + // required-features field when deciding whether to be built or skipped. + for (dep, _) in resolve_with_overrides.deps(package_id) { + for feature in resolve_with_overrides.features(dep) { + features.insert(dep.name().to_string() + "/" + feature.0, feature.1.clone()); + } + } + + features +} + +/// Given a list of all targets for a package, filters out only the targets +/// that are automatically included when the user doesn't specify any targets. +fn filter_default_targets(targets: &[Target], mode: CompileMode) -> Vec<&Target> { + match mode { + CompileMode::Bench => targets.iter().filter(|t| t.benched()).collect(), + CompileMode::Test => targets + .iter() + .filter(|t| t.tested() || t.is_example()) + .collect(), + CompileMode::Build | CompileMode::Check { .. } => targets + .iter() + .filter(|t| t.is_bin() || t.is_lib()) + .collect(), + CompileMode::Doc { .. } => { + // `doc` does lib and bins (bin with same name as lib is skipped). + targets + .iter() + .filter(|t| { + t.documented() + && (!t.is_bin() + || !targets.iter().any(|l| l.is_lib() && l.name() == t.name())) + }) + .collect() + } + CompileMode::Doctest | CompileMode::RunCustomBuild => panic!("Invalid mode {:?}", mode), + } +} + +/// Returns a list of proposed targets based on command-line target selection flags. +fn list_rule_targets<'a>( + packages: &[&'a Package], + rule: &FilterRule, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, + mode: CompileMode, +) -> CargoResult>> { + let mut proposals = Vec::new(); + match rule { + FilterRule::All => { + proposals.extend(filter_targets(packages, is_expected_kind, false, mode)) + } + FilterRule::Just(names) => { + for name in names { + proposals.extend(find_named_targets( + packages, + name, + target_desc, + is_expected_kind, + mode, + )?); + } + } + } + Ok(proposals) +} + +/// Finds the targets for a specifically named target. +fn find_named_targets<'a>( + packages: &[&'a Package], + target_name: &str, + target_desc: &'static str, + is_expected_kind: fn(&Target) -> bool, + mode: CompileMode, +) -> CargoResult>> { + let filter = |t: &Target| t.name() == target_name && is_expected_kind(t); + let proposals = filter_targets(packages, filter, true, mode); + if proposals.is_empty() { + let targets = packages.iter().flat_map(|pkg| { + pkg.targets() + .iter() + .filter(|target| is_expected_kind(target)) + }); + let suggestion = closest_msg(target_name, targets, |t| t.name()); + failure::bail!( + "no {} target named `{}`{}", + target_desc, + target_name, + suggestion + ); + } + Ok(proposals) +} + +fn filter_targets<'a>( + packages: &[&'a Package], + predicate: impl Fn(&Target) -> bool, + requires_features: bool, + mode: CompileMode, +) -> Vec> { + let mut proposals = Vec::new(); + for pkg in packages { + for target in pkg.targets().iter().filter(|t| predicate(t)) { + proposals.push(Proposal { + pkg, + target, + requires_features, + mode, + }); + } + } + proposals +} diff --git a/src/cargo/ops/registry.rs b/src/cargo/ops/registry.rs index 5ecdaf6f53c..7c216f7f78a 100644 --- a/src/cargo/ops/registry.rs +++ b/src/cargo/ops/registry.rs @@ -1,778 +1,778 @@ -use std::collections::{BTreeMap, HashSet}; -use std::fs::{self, File}; -use std::io::{self, BufRead}; -use std::iter::repeat; -use std::str; -use std::time::Duration; -use std::{cmp, env}; - -use crates_io::{NewCrate, NewCrateDependency, Registry}; -use curl::easy::{Easy, InfoType, SslOpt}; -use failure::{bail, format_err}; -use log::{log, Level}; -use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; - -use crate::core::dependency::Kind; -use crate::core::manifest::ManifestMetadata; -use crate::core::source::Source; -use crate::core::{Package, SourceId, Workspace}; -use crate::ops; -use crate::sources::{RegistrySource, SourceConfigMap, CRATES_IO_REGISTRY}; -use crate::util::config::{self, Config}; -use crate::util::errors::{CargoResult, CargoResultExt}; -use crate::util::important_paths::find_root_manifest_for_wd; -use crate::util::IntoUrl; -use crate::util::{paths, validate_package_name}; -use crate::version; - -pub struct RegistryConfig { - pub index: Option, - pub token: Option, -} - -pub struct PublishOpts<'cfg> { - pub config: &'cfg Config, - pub token: Option, - pub index: Option, - pub verify: bool, - pub allow_dirty: bool, - pub jobs: Option, - pub target: Option, - pub dry_run: bool, - pub registry: Option, - pub features: Vec, - pub all_features: bool, - pub no_default_features: bool, -} - -pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> { - let pkg = ws.current()?; - - if let Some(ref allowed_registries) = *pkg.publish() { - let reg_name = opts - .registry - .clone() - .unwrap_or_else(|| CRATES_IO_REGISTRY.to_string()); - if !allowed_registries.contains(®_name) { - bail!( - "`{}` cannot be published.\n\ - The registry `{}` is not listed in the `publish` value in Cargo.toml.", - pkg.name(), - reg_name - ); - } - } - - let (mut registry, reg_id) = registry( - opts.config, - opts.token.clone(), - opts.index.clone(), - opts.registry.clone(), - true, - !opts.dry_run, - )?; - verify_dependencies(pkg, ®istry, reg_id)?; - - // Prepare a tarball, with a non-surpressable warning if metadata - // is missing since this is being put online. - let tarball = ops::package( - ws, - &ops::PackageOpts { - config: opts.config, - verify: opts.verify, - list: false, - check_metadata: true, - allow_dirty: opts.allow_dirty, - target: opts.target.clone(), - jobs: opts.jobs, - features: opts.features.clone(), - all_features: opts.all_features, - no_default_features: opts.no_default_features, - }, - )? - .unwrap(); - - // Upload said tarball to the specified destination - opts.config - .shell() - .status("Uploading", pkg.package_id().to_string())?; - transmit( - opts.config, - pkg, - tarball.file(), - &mut registry, - reg_id, - opts.dry_run, - )?; - - Ok(()) -} - -fn verify_dependencies( - pkg: &Package, - registry: &Registry, - registry_src: SourceId, -) -> CargoResult<()> { - for dep in pkg.dependencies().iter() { - if dep.source_id().is_path() { - if !dep.specified_req() { - bail!( - "all path dependencies must have a version specified \ - when publishing.\ndependency `{}` does not specify \ - a version", - dep.package_name() - ) - } - } else if dep.source_id() != registry_src { - if dep.source_id().is_registry() { - // Block requests to send to crates.io with alt-registry deps. - // This extra hostname check is mostly to assist with testing, - // but also prevents someone using `--index` to specify - // something that points to crates.io. - if registry_src.is_default_registry() || registry.host_is_crates_io() { - bail!("crates cannot be published to crates.io with dependencies sourced from other\n\ - registries either publish `{}` on crates.io or pull it into this repository\n\ - and specify it with a path and version\n\ - (crate `{}` is pulled from {})", - dep.package_name(), - dep.package_name(), - dep.source_id()); - } - } else { - bail!( - "crates cannot be published with dependencies sourced from \ - a repository\neither publish `{}` as its own crate and \ - specify a version as a dependency or pull it into this \ - repository and specify it with a path and version\n(crate `{}` has \ - repository path `{}`)", - dep.package_name(), - dep.package_name(), - dep.source_id() - ); - } - } - } - Ok(()) -} - -fn transmit( - config: &Config, - pkg: &Package, - tarball: &File, - registry: &mut Registry, - registry_id: SourceId, - dry_run: bool, -) -> CargoResult<()> { - let deps = pkg - .dependencies() - .iter() - .map(|dep| { - // If the dependency is from a different registry, then include the - // registry in the dependency. - let dep_registry_id = match dep.registry_id() { - Some(id) => id, - None => SourceId::crates_io(config)?, - }; - // In the index and Web API, None means "from the same registry" - // whereas in Cargo.toml, it means "from crates.io". - let dep_registry = if dep_registry_id != registry_id { - Some(dep_registry_id.url().to_string()) - } else { - None - }; - - Ok(NewCrateDependency { - optional: dep.is_optional(), - default_features: dep.uses_default_features(), - name: dep.package_name().to_string(), - features: dep.features().iter().map(|s| s.to_string()).collect(), - version_req: dep.version_req().to_string(), - target: dep.platform().map(|s| s.to_string()), - kind: match dep.kind() { - Kind::Normal => "normal", - Kind::Build => "build", - Kind::Development => "dev", - } - .to_string(), - registry: dep_registry, - explicit_name_in_toml: dep.explicit_name_in_toml().map(|s| s.to_string()), - }) - }) - .collect::>>()?; - let manifest = pkg.manifest(); - let ManifestMetadata { - ref authors, - ref description, - ref homepage, - ref documentation, - ref keywords, - ref readme, - ref repository, - ref license, - ref license_file, - ref categories, - ref badges, - ref links, - } = *manifest.metadata(); - let readme_content = match *readme { - Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), - None => None, - }; - if let Some(ref file) = *license_file { - if fs::metadata(&pkg.root().join(file)).is_err() { - bail!("the license file `{}` does not exist", file) - } - } - - // Do not upload if performing a dry run - if dry_run { - config.shell().warn("aborting upload due to dry run")?; - return Ok(()); - } - - let summary = pkg.summary(); - let string_features = summary - .features() - .iter() - .map(|(feat, values)| { - ( - feat.to_string(), - values.iter().map(|fv| fv.to_string(summary)).collect(), - ) - }) - .collect::>>(); - - let publish = registry.publish( - &NewCrate { - name: pkg.name().to_string(), - vers: pkg.version().to_string(), - deps, - features: string_features, - authors: authors.clone(), - description: description.clone(), - homepage: homepage.clone(), - documentation: documentation.clone(), - keywords: keywords.clone(), - categories: categories.clone(), - readme: readme_content, - readme_file: readme.clone(), - repository: repository.clone(), - license: license.clone(), - license_file: license_file.clone(), - badges: badges.clone(), - links: links.clone(), - }, - tarball, - ); - - match publish { - Ok(warnings) => { - if !warnings.invalid_categories.is_empty() { - let msg = format!( - "the following are not valid category slugs and were \ - ignored: {}. Please see https://crates.io/category_slugs \ - for the list of all category slugs. \ - ", - warnings.invalid_categories.join(", ") - ); - config.shell().warn(&msg)?; - } - - if !warnings.invalid_badges.is_empty() { - let msg = format!( - "the following are not valid badges and were ignored: {}. \ - Either the badge type specified is unknown or a required \ - attribute is missing. Please see \ - https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata \ - for valid badge types and their required attributes.", - warnings.invalid_badges.join(", ") - ); - config.shell().warn(&msg)?; - } - - if !warnings.other.is_empty() { - for msg in warnings.other { - config.shell().warn(&msg)?; - } - } - - Ok(()) - } - Err(e) => Err(e), - } -} - -pub fn registry_configuration( - config: &Config, - registry: Option, -) -> CargoResult { - let (index, token) = match registry { - Some(registry) => { - validate_package_name(®istry, "registry name", "")?; - ( - Some(config.get_registry_index(®istry)?.to_string()), - config - .get_string(&format!("registries.{}.token", registry))? - .map(|p| p.val), - ) - } - None => { - // Checking for default index and token - ( - config - .get_default_registry_index()? - .map(|url| url.to_string()), - config.get_string("registry.token")?.map(|p| p.val), - ) - } - }; - - Ok(RegistryConfig { index, token }) -} - -fn registry( - config: &Config, - token: Option, - index: Option, - registry: Option, - force_update: bool, - validate_token: bool, -) -> CargoResult<(Registry, SourceId)> { - // Parse all configuration options - let RegistryConfig { - token: token_config, - index: index_config, - } = registry_configuration(config, registry.clone())?; - let token = token.or(token_config); - let sid = get_source_id(config, index_config.or(index), registry)?; - let api_host = { - let _lock = config.acquire_package_cache_lock()?; - let mut src = RegistrySource::remote(sid, &HashSet::new(), config); - // Only update the index if the config is not available or `force` is set. - let cfg = src.config(); - let cfg = if force_update || cfg.is_err() { - src.update() - .chain_err(|| format!("failed to update {}", sid))?; - cfg.or_else(|_| src.config())? - } else { - cfg.unwrap() - }; - cfg.and_then(|cfg| cfg.api) - .ok_or_else(|| format_err!("{} does not support API commands", sid))? - }; - let handle = http_handle(config)?; - if validate_token && token.is_none() { - bail!("no upload token found, please run `cargo login`"); - }; - Ok((Registry::new_handle(api_host, token, handle), sid)) -} - -/// Creates a new HTTP handle with appropriate global configuration for cargo. -pub fn http_handle(config: &Config) -> CargoResult { - let (mut handle, timeout) = http_handle_and_timeout(config)?; - timeout.configure(&mut handle)?; - Ok(handle) -} - -pub fn http_handle_and_timeout(config: &Config) -> CargoResult<(Easy, HttpTimeout)> { - if config.frozen() { - bail!( - "attempting to make an HTTP request, but --frozen was \ - specified" - ) - } - if !config.network_allowed() { - bail!("can't make HTTP request in the offline mode") - } - - // The timeout option for libcurl by default times out the entire transfer, - // but we probably don't want this. Instead we only set timeouts for the - // connect phase as well as a "low speed" timeout so if we don't receive - // many bytes in a large-ish period of time then we time out. - let mut handle = Easy::new(); - let timeout = configure_http_handle(config, &mut handle)?; - Ok((handle, timeout)) -} - -pub fn needs_custom_http_transport(config: &Config) -> CargoResult { - let proxy_exists = http_proxy_exists(config)?; - let timeout = HttpTimeout::new(config)?.is_non_default(); - let cainfo = config.get_path("http.cainfo")?; - let check_revoke = config.get_bool("http.check-revoke")?; - let user_agent = config.get_string("http.user-agent")?; - - Ok(proxy_exists - || timeout - || cainfo.is_some() - || check_revoke.is_some() - || user_agent.is_some()) -} - -/// Configure a libcurl http handle with the defaults options for Cargo -pub fn configure_http_handle(config: &Config, handle: &mut Easy) -> CargoResult { - if let Some(proxy) = http_proxy(config)? { - handle.proxy(&proxy)?; - } - if let Some(cainfo) = config.get_path("http.cainfo")? { - handle.cainfo(&cainfo.val)?; - } - if let Some(check) = config.get_bool("http.check-revoke")? { - handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; - } - if let Some(user_agent) = config.get_string("http.user-agent")? { - handle.useragent(&user_agent.val)?; - } else { - handle.useragent(&version().to_string())?; - } - - if let Some(true) = config.get::>("http.debug")? { - handle.verbose(true)?; - handle.debug_function(|kind, data| { - let (prefix, level) = match kind { - InfoType::Text => ("*", Level::Debug), - InfoType::HeaderIn => ("<", Level::Debug), - InfoType::HeaderOut => (">", Level::Debug), - InfoType::DataIn => ("{", Level::Trace), - InfoType::DataOut => ("}", Level::Trace), - InfoType::SslDataIn | InfoType::SslDataOut => return, - _ => return, - }; - match str::from_utf8(data) { - Ok(s) => { - for line in s.lines() { - log!(level, "http-debug: {} {}", prefix, line); - } - } - Err(_) => { - log!( - level, - "http-debug: {} ({} bytes of data)", - prefix, - data.len() - ); - } - } - })?; - } - - HttpTimeout::new(config) -} - -#[must_use] -pub struct HttpTimeout { - pub dur: Duration, - pub low_speed_limit: u32, -} - -impl HttpTimeout { - pub fn new(config: &Config) -> CargoResult { - let low_speed_limit = config - .get::>("http.low-speed-limit")? - .unwrap_or(10); - let seconds = config - .get::>("http.timeout")? - .or_else(|| env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) - .unwrap_or(30); - Ok(HttpTimeout { - dur: Duration::new(seconds, 0), - low_speed_limit, - }) - } - - fn is_non_default(&self) -> bool { - self.dur != Duration::new(30, 0) || self.low_speed_limit != 10 - } - - pub fn configure(&self, handle: &mut Easy) -> CargoResult<()> { - // The timeout option for libcurl by default times out the entire - // transfer, but we probably don't want this. Instead we only set - // timeouts for the connect phase as well as a "low speed" timeout so - // if we don't receive many bytes in a large-ish period of time then we - // time out. - handle.connect_timeout(self.dur)?; - handle.low_speed_time(self.dur)?; - handle.low_speed_limit(self.low_speed_limit)?; - Ok(()) - } -} - -/// Finds an explicit HTTP proxy if one is available. -/// -/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified -/// via environment variables are picked up by libcurl. -fn http_proxy(config: &Config) -> CargoResult> { - if let Some(s) = config.get_string("http.proxy")? { - return Ok(Some(s.val)); - } - if let Ok(cfg) = git2::Config::open_default() { - if let Ok(s) = cfg.get_str("http.proxy") { - return Ok(Some(s.to_string())); - } - } - Ok(None) -} - -/// Determine if an http proxy exists. -/// -/// Checks the following for existence, in order: -/// -/// * cargo's `http.proxy` -/// * git's `http.proxy` -/// * `http_proxy` env var -/// * `HTTP_PROXY` env var -/// * `https_proxy` env var -/// * `HTTPS_PROXY` env var -fn http_proxy_exists(config: &Config) -> CargoResult { - if http_proxy(config)?.is_some() { - Ok(true) - } else { - Ok(["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"] - .iter() - .any(|v| env::var(v).is_ok())) - } -} - -pub fn registry_login( - config: &Config, - token: Option, - reg: Option, -) -> CargoResult<()> { - let (registry, _) = registry(config, token.clone(), None, reg.clone(), false, false)?; - - let token = match token { - Some(token) => token, - None => { - println!( - "please visit {}/me and paste the API Token below", - registry.host() - ); - let mut line = String::new(); - let input = io::stdin(); - input - .lock() - .read_line(&mut line) - .chain_err(|| "failed to read stdin") - .map_err(failure::Error::from)?; - line.trim().to_string() - } - }; - - let RegistryConfig { - token: old_token, .. - } = registry_configuration(config, reg.clone())?; - - if let Some(old_token) = old_token { - if old_token == token { - config.shell().status("Login", "already logged in")?; - return Ok(()); - } - } - - config::save_credentials(config, token, reg.clone())?; - config.shell().status( - "Login", - format!( - "token for `{}` saved", - reg.as_ref().map_or("crates.io", String::as_str) - ), - )?; - Ok(()) -} - -pub struct OwnersOptions { - pub krate: Option, - pub token: Option, - pub index: Option, - pub to_add: Option>, - pub to_remove: Option>, - pub list: bool, - pub registry: Option, -} - -pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> { - let name = match opts.krate { - Some(ref name) => name.clone(), - None => { - let manifest_path = find_root_manifest_for_wd(config.cwd())?; - let ws = Workspace::new(&manifest_path, config)?; - ws.current()?.package_id().name().to_string() - } - }; - - let (mut registry, _) = registry( - config, - opts.token.clone(), - opts.index.clone(), - opts.registry.clone(), - true, - true, - )?; - - if let Some(ref v) = opts.to_add { - let v = v.iter().map(|s| &s[..]).collect::>(); - let msg = registry - .add_owners(&name, &v) - .map_err(|e| format_err!("failed to invite owners to crate {}: {}", name, e))?; - - config.shell().status("Owner", msg)?; - } - - if let Some(ref v) = opts.to_remove { - let v = v.iter().map(|s| &s[..]).collect::>(); - config - .shell() - .status("Owner", format!("removing {:?} from crate {}", v, name))?; - registry - .remove_owners(&name, &v) - .chain_err(|| format!("failed to remove owners from crate {}", name))?; - } - - if opts.list { - let owners = registry - .list_owners(&name) - .chain_err(|| format!("failed to list owners of crate {}", name))?; - for owner in owners.iter() { - print!("{}", owner.login); - match (owner.name.as_ref(), owner.email.as_ref()) { - (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), - (Some(s), None) | (None, Some(s)) => println!(" ({})", s), - (None, None) => println!(), - } - } - } - - Ok(()) -} - -pub fn yank( - config: &Config, - krate: Option, - version: Option, - token: Option, - index: Option, - undo: bool, - reg: Option, -) -> CargoResult<()> { - let name = match krate { - Some(name) => name, - None => { - let manifest_path = find_root_manifest_for_wd(config.cwd())?; - let ws = Workspace::new(&manifest_path, config)?; - ws.current()?.package_id().name().to_string() - } - }; - let version = match version { - Some(v) => v, - None => bail!("a version must be specified to yank"), - }; - - let (mut registry, _) = registry(config, token, index, reg, true, true)?; - - if undo { - config - .shell() - .status("Unyank", format!("{}:{}", name, version))?; - registry - .unyank(&name, &version) - .chain_err(|| "failed to undo a yank")?; - } else { - config - .shell() - .status("Yank", format!("{}:{}", name, version))?; - registry - .yank(&name, &version) - .chain_err(|| "failed to yank")?; - } - - Ok(()) -} - -fn get_source_id( - config: &Config, - index: Option, - reg: Option, -) -> CargoResult { - match (reg, index) { - (Some(r), _) => SourceId::alt_registry(config, &r), - (_, Some(i)) => SourceId::for_registry(&i.into_url()?), - _ => { - let map = SourceConfigMap::new(config)?; - let src = map.load(SourceId::crates_io(config)?, &HashSet::new())?; - Ok(src.replaced_source_id()) - } - } -} - -pub fn search( - query: &str, - config: &Config, - index: Option, - limit: u32, - reg: Option, -) -> CargoResult<()> { - fn truncate_with_ellipsis(s: &str, max_width: usize) -> String { - // We should truncate at grapheme-boundary and compute character-widths, - // yet the dependencies on unicode-segmentation and unicode-width are - // not worth it. - let mut chars = s.chars(); - let mut prefix = (&mut chars).take(max_width - 1).collect::(); - if chars.next().is_some() { - prefix.push('…'); - } - prefix - } - - let (mut registry, source_id) = registry(config, None, index, reg, false, false)?; - let (crates, total_crates) = registry - .search(query, limit) - .chain_err(|| "failed to retrieve search results from the registry")?; - - let names = crates - .iter() - .map(|krate| format!("{} = \"{}\"", krate.name, krate.max_version)) - .collect::>(); - - let description_margin = names.iter().map(|s| s.len() + 4).max().unwrap_or_default(); - - let description_length = cmp::max(80, 128 - description_margin); - - let descriptions = crates.iter().map(|krate| { - krate - .description - .as_ref() - .map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), description_length)) - }); - - for (name, description) in names.into_iter().zip(descriptions) { - let line = match description { - Some(desc) => { - let space = repeat(' ') - .take(description_margin - name.len()) - .collect::(); - name + &space + "# " + &desc - } - None => name, - }; - println!("{}", line); - } - - let search_max_limit = 100; - if total_crates > limit && limit < search_max_limit { - println!( - "... and {} crates more (use --limit N to see more)", - total_crates - limit - ); - } else if total_crates > limit && limit >= search_max_limit { - let extra = if source_id.is_default_registry() { - format!( - " (go to https://crates.io/search?q={} to see more)", - percent_encode(query.as_bytes(), NON_ALPHANUMERIC) - ) - } else { - String::new() - }; - println!("... and {} crates more{}", total_crates - limit, extra); - } - - Ok(()) -} +use std::collections::{BTreeMap, HashSet}; +use std::fs::{self, File}; +use std::io::{self, BufRead}; +use std::iter::repeat; +use std::str; +use std::time::Duration; +use std::{cmp, env}; + +use crates_io::{NewCrate, NewCrateDependency, Registry}; +use curl::easy::{Easy, InfoType, SslOpt}; +use failure::{bail, format_err}; +use log::{log, Level}; +use percent_encoding::{percent_encode, NON_ALPHANUMERIC}; + +use crate::core::dependency::Kind; +use crate::core::manifest::ManifestMetadata; +use crate::core::source::Source; +use crate::core::{Package, SourceId, Workspace}; +use crate::ops; +use crate::sources::{RegistrySource, SourceConfigMap, CRATES_IO_REGISTRY}; +use crate::util::config::{self, Config}; +use crate::util::errors::{CargoResult, CargoResultExt}; +use crate::util::important_paths::find_root_manifest_for_wd; +use crate::util::IntoUrl; +use crate::util::{paths, validate_package_name}; +use crate::version; + +pub struct RegistryConfig { + pub index: Option, + pub token: Option, +} + +pub struct PublishOpts<'cfg> { + pub config: &'cfg Config, + pub token: Option, + pub index: Option, + pub verify: bool, + pub allow_dirty: bool, + pub jobs: Option, + pub target: Option, + pub dry_run: bool, + pub registry: Option, + pub features: Vec, + pub all_features: bool, + pub no_default_features: bool, +} + +pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> { + let pkg = ws.current()?; + + if let Some(ref allowed_registries) = *pkg.publish() { + let reg_name = opts + .registry + .clone() + .unwrap_or_else(|| CRATES_IO_REGISTRY.to_string()); + if !allowed_registries.contains(®_name) { + bail!( + "`{}` cannot be published.\n\ + The registry `{}` is not listed in the `publish` value in Cargo.toml.", + pkg.name(), + reg_name + ); + } + } + + let (mut registry, reg_id) = registry( + opts.config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + true, + !opts.dry_run, + )?; + verify_dependencies(pkg, ®istry, reg_id)?; + + // Prepare a tarball, with a non-surpressable warning if metadata + // is missing since this is being put online. + let tarball = ops::package( + ws, + &ops::PackageOpts { + config: opts.config, + verify: opts.verify, + list: false, + check_metadata: true, + allow_dirty: opts.allow_dirty, + target: opts.target.clone(), + jobs: opts.jobs, + features: opts.features.clone(), + all_features: opts.all_features, + no_default_features: opts.no_default_features, + }, + )? + .unwrap(); + + // Upload said tarball to the specified destination + opts.config + .shell() + .status("Uploading", pkg.package_id().to_string())?; + transmit( + opts.config, + pkg, + tarball.file(), + &mut registry, + reg_id, + opts.dry_run, + )?; + + Ok(()) +} + +fn verify_dependencies( + pkg: &Package, + registry: &Registry, + registry_src: SourceId, +) -> CargoResult<()> { + for dep in pkg.dependencies().iter() { + if dep.source_id().is_path() { + if !dep.specified_req() { + bail!( + "all path dependencies must have a version specified \ + when publishing.\ndependency `{}` does not specify \ + a version", + dep.package_name() + ) + } + } else if dep.source_id() != registry_src { + if dep.source_id().is_registry() { + // Block requests to send to crates.io with alt-registry deps. + // This extra hostname check is mostly to assist with testing, + // but also prevents someone using `--index` to specify + // something that points to crates.io. + if registry_src.is_default_registry() || registry.host_is_crates_io() { + bail!("crates cannot be published to crates.io with dependencies sourced from other\n\ + registries either publish `{}` on crates.io or pull it into this repository\n\ + and specify it with a path and version\n\ + (crate `{}` is pulled from {})", + dep.package_name(), + dep.package_name(), + dep.source_id()); + } + } else { + bail!( + "crates cannot be published with dependencies sourced from \ + a repository\neither publish `{}` as its own crate and \ + specify a version as a dependency or pull it into this \ + repository and specify it with a path and version\n(crate `{}` has \ + repository path `{}`)", + dep.package_name(), + dep.package_name(), + dep.source_id() + ); + } + } + } + Ok(()) +} + +fn transmit( + config: &Config, + pkg: &Package, + tarball: &File, + registry: &mut Registry, + registry_id: SourceId, + dry_run: bool, +) -> CargoResult<()> { + let deps = pkg + .dependencies() + .iter() + .map(|dep| { + // If the dependency is from a different registry, then include the + // registry in the dependency. + let dep_registry_id = match dep.registry_id() { + Some(id) => id, + None => SourceId::crates_io(config)?, + }; + // In the index and Web API, None means "from the same registry" + // whereas in Cargo.toml, it means "from crates.io". + let dep_registry = if dep_registry_id != registry_id { + Some(dep_registry_id.url().to_string()) + } else { + None + }; + + Ok(NewCrateDependency { + optional: dep.is_optional(), + default_features: dep.uses_default_features(), + name: dep.package_name().to_string(), + features: dep.features().iter().map(|s| s.to_string()).collect(), + version_req: dep.version_req().to_string(), + target: dep.platform().map(|s| s.to_string()), + kind: match dep.kind() { + Kind::Normal => "normal", + Kind::Build => "build", + Kind::Development => "dev", + } + .to_string(), + registry: dep_registry, + explicit_name_in_toml: dep.explicit_name_in_toml().map(|s| s.to_string()), + }) + }) + .collect::>>()?; + let manifest = pkg.manifest(); + let ManifestMetadata { + ref authors, + ref description, + ref homepage, + ref documentation, + ref keywords, + ref readme, + ref repository, + ref license, + ref license_file, + ref categories, + ref badges, + ref links, + } = *manifest.metadata(); + let readme_content = match *readme { + Some(ref readme) => Some(paths::read(&pkg.root().join(readme))?), + None => None, + }; + if let Some(ref file) = *license_file { + if fs::metadata(&pkg.root().join(file)).is_err() { + bail!("the license file `{}` does not exist", file) + } + } + + // Do not upload if performing a dry run + if dry_run { + config.shell().warn("aborting upload due to dry run")?; + return Ok(()); + } + + let summary = pkg.summary(); + let string_features = summary + .features() + .iter() + .map(|(feat, values)| { + ( + feat.to_string(), + values.1.iter().map(|fv| fv.to_string(&summary)).collect(), + ) + }) + .collect::>>(); + + let publish = registry.publish( + &NewCrate { + name: pkg.name().to_string(), + vers: pkg.version().to_string(), + deps, + features: string_features, + authors: authors.clone(), + description: description.clone(), + homepage: homepage.clone(), + documentation: documentation.clone(), + keywords: keywords.clone(), + categories: categories.clone(), + readme: readme_content, + readme_file: readme.clone(), + repository: repository.clone(), + license: license.clone(), + license_file: license_file.clone(), + badges: badges.clone(), + links: links.clone(), + }, + tarball, + ); + + match publish { + Ok(warnings) => { + if !warnings.invalid_categories.is_empty() { + let msg = format!( + "the following are not valid category slugs and were \ + ignored: {}. Please see https://crates.io/category_slugs \ + for the list of all category slugs. \ + ", + warnings.invalid_categories.join(", ") + ); + config.shell().warn(&msg)?; + } + + if !warnings.invalid_badges.is_empty() { + let msg = format!( + "the following are not valid badges and were ignored: {}. \ + Either the badge type specified is unknown or a required \ + attribute is missing. Please see \ + https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata \ + for valid badge types and their required attributes.", + warnings.invalid_badges.join(", ") + ); + config.shell().warn(&msg)?; + } + + if !warnings.other.is_empty() { + for msg in warnings.other { + config.shell().warn(&msg)?; + } + } + + Ok(()) + } + Err(e) => Err(e), + } +} + +pub fn registry_configuration( + config: &Config, + registry: Option, +) -> CargoResult { + let (index, token) = match registry { + Some(registry) => { + validate_package_name(®istry, "registry name", "")?; + ( + Some(config.get_registry_index(®istry)?.to_string()), + config + .get_string(&format!("registries.{}.token", registry))? + .map(|p| p.val), + ) + } + None => { + // Checking for default index and token + ( + config + .get_default_registry_index()? + .map(|url| url.to_string()), + config.get_string("registry.token")?.map(|p| p.val), + ) + } + }; + + Ok(RegistryConfig { index, token }) +} + +fn registry( + config: &Config, + token: Option, + index: Option, + registry: Option, + force_update: bool, + validate_token: bool, +) -> CargoResult<(Registry, SourceId)> { + // Parse all configuration options + let RegistryConfig { + token: token_config, + index: index_config, + } = registry_configuration(config, registry.clone())?; + let token = token.or(token_config); + let sid = get_source_id(config, index_config.or(index), registry)?; + let api_host = { + let _lock = config.acquire_package_cache_lock()?; + let mut src = RegistrySource::remote(sid, &HashSet::new(), config); + // Only update the index if the config is not available or `force` is set. + let cfg = src.config(); + let cfg = if force_update || cfg.is_err() { + src.update() + .chain_err(|| format!("failed to update {}", sid))?; + cfg.or_else(|_| src.config())? + } else { + cfg.unwrap() + }; + cfg.and_then(|cfg| cfg.api) + .ok_or_else(|| format_err!("{} does not support API commands", sid))? + }; + let handle = http_handle(config)?; + if validate_token && token.is_none() { + bail!("no upload token found, please run `cargo login`"); + }; + Ok((Registry::new_handle(api_host, token, handle), sid)) +} + +/// Creates a new HTTP handle with appropriate global configuration for cargo. +pub fn http_handle(config: &Config) -> CargoResult { + let (mut handle, timeout) = http_handle_and_timeout(config)?; + timeout.configure(&mut handle)?; + Ok(handle) +} + +pub fn http_handle_and_timeout(config: &Config) -> CargoResult<(Easy, HttpTimeout)> { + if config.frozen() { + bail!( + "attempting to make an HTTP request, but --frozen was \ + specified" + ) + } + if !config.network_allowed() { + bail!("can't make HTTP request in the offline mode") + } + + // The timeout option for libcurl by default times out the entire transfer, + // but we probably don't want this. Instead we only set timeouts for the + // connect phase as well as a "low speed" timeout so if we don't receive + // many bytes in a large-ish period of time then we time out. + let mut handle = Easy::new(); + let timeout = configure_http_handle(config, &mut handle)?; + Ok((handle, timeout)) +} + +pub fn needs_custom_http_transport(config: &Config) -> CargoResult { + let proxy_exists = http_proxy_exists(config)?; + let timeout = HttpTimeout::new(config)?.is_non_default(); + let cainfo = config.get_path("http.cainfo")?; + let check_revoke = config.get_bool("http.check-revoke")?; + let user_agent = config.get_string("http.user-agent")?; + + Ok(proxy_exists + || timeout + || cainfo.is_some() + || check_revoke.is_some() + || user_agent.is_some()) +} + +/// Configure a libcurl http handle with the defaults options for Cargo +pub fn configure_http_handle(config: &Config, handle: &mut Easy) -> CargoResult { + if let Some(proxy) = http_proxy(config)? { + handle.proxy(&proxy)?; + } + if let Some(cainfo) = config.get_path("http.cainfo")? { + handle.cainfo(&cainfo.val)?; + } + if let Some(check) = config.get_bool("http.check-revoke")? { + handle.ssl_options(SslOpt::new().no_revoke(!check.val))?; + } + if let Some(user_agent) = config.get_string("http.user-agent")? { + handle.useragent(&user_agent.val)?; + } else { + handle.useragent(&version().to_string())?; + } + + if let Some(true) = config.get::>("http.debug")? { + handle.verbose(true)?; + handle.debug_function(|kind, data| { + let (prefix, level) = match kind { + InfoType::Text => ("*", Level::Debug), + InfoType::HeaderIn => ("<", Level::Debug), + InfoType::HeaderOut => (">", Level::Debug), + InfoType::DataIn => ("{", Level::Trace), + InfoType::DataOut => ("}", Level::Trace), + InfoType::SslDataIn | InfoType::SslDataOut => return, + _ => return, + }; + match str::from_utf8(data) { + Ok(s) => { + for line in s.lines() { + log!(level, "http-debug: {} {}", prefix, line); + } + } + Err(_) => { + log!( + level, + "http-debug: {} ({} bytes of data)", + prefix, + data.len() + ); + } + } + })?; + } + + HttpTimeout::new(config) +} + +#[must_use] +pub struct HttpTimeout { + pub dur: Duration, + pub low_speed_limit: u32, +} + +impl HttpTimeout { + pub fn new(config: &Config) -> CargoResult { + let low_speed_limit = config + .get::>("http.low-speed-limit")? + .unwrap_or(10); + let seconds = config + .get::>("http.timeout")? + .or_else(|| env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok())) + .unwrap_or(30); + Ok(HttpTimeout { + dur: Duration::new(seconds, 0), + low_speed_limit, + }) + } + + fn is_non_default(&self) -> bool { + self.dur != Duration::new(30, 0) || self.low_speed_limit != 10 + } + + pub fn configure(&self, handle: &mut Easy) -> CargoResult<()> { + // The timeout option for libcurl by default times out the entire + // transfer, but we probably don't want this. Instead we only set + // timeouts for the connect phase as well as a "low speed" timeout so + // if we don't receive many bytes in a large-ish period of time then we + // time out. + handle.connect_timeout(self.dur)?; + handle.low_speed_time(self.dur)?; + handle.low_speed_limit(self.low_speed_limit)?; + Ok(()) + } +} + +/// Finds an explicit HTTP proxy if one is available. +/// +/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified +/// via environment variables are picked up by libcurl. +fn http_proxy(config: &Config) -> CargoResult> { + if let Some(s) = config.get_string("http.proxy")? { + return Ok(Some(s.val)); + } + if let Ok(cfg) = git2::Config::open_default() { + if let Ok(s) = cfg.get_str("http.proxy") { + return Ok(Some(s.to_string())); + } + } + Ok(None) +} + +/// Determine if an http proxy exists. +/// +/// Checks the following for existence, in order: +/// +/// * cargo's `http.proxy` +/// * git's `http.proxy` +/// * `http_proxy` env var +/// * `HTTP_PROXY` env var +/// * `https_proxy` env var +/// * `HTTPS_PROXY` env var +fn http_proxy_exists(config: &Config) -> CargoResult { + if http_proxy(config)?.is_some() { + Ok(true) + } else { + Ok(["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"] + .iter() + .any(|v| env::var(v).is_ok())) + } +} + +pub fn registry_login( + config: &Config, + token: Option, + reg: Option, +) -> CargoResult<()> { + let (registry, _) = registry(config, token.clone(), None, reg.clone(), false, false)?; + + let token = match token { + Some(token) => token, + None => { + println!( + "please visit {}/me and paste the API Token below", + registry.host() + ); + let mut line = String::new(); + let input = io::stdin(); + input + .lock() + .read_line(&mut line) + .chain_err(|| "failed to read stdin") + .map_err(failure::Error::from)?; + line.trim().to_string() + } + }; + + let RegistryConfig { + token: old_token, .. + } = registry_configuration(config, reg.clone())?; + + if let Some(old_token) = old_token { + if old_token == token { + config.shell().status("Login", "already logged in")?; + return Ok(()); + } + } + + config::save_credentials(config, token, reg.clone())?; + config.shell().status( + "Login", + format!( + "token for `{}` saved", + reg.as_ref().map_or("crates.io", String::as_str) + ), + )?; + Ok(()) +} + +pub struct OwnersOptions { + pub krate: Option, + pub token: Option, + pub index: Option, + pub to_add: Option>, + pub to_remove: Option>, + pub list: bool, + pub registry: Option, +} + +pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> { + let name = match opts.krate { + Some(ref name) => name.clone(), + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + + let (mut registry, _) = registry( + config, + opts.token.clone(), + opts.index.clone(), + opts.registry.clone(), + true, + true, + )?; + + if let Some(ref v) = opts.to_add { + let v = v.iter().map(|s| &s[..]).collect::>(); + let msg = registry + .add_owners(&name, &v) + .map_err(|e| format_err!("failed to invite owners to crate {}: {}", name, e))?; + + config.shell().status("Owner", msg)?; + } + + if let Some(ref v) = opts.to_remove { + let v = v.iter().map(|s| &s[..]).collect::>(); + config + .shell() + .status("Owner", format!("removing {:?} from crate {}", v, name))?; + registry + .remove_owners(&name, &v) + .chain_err(|| format!("failed to remove owners from crate {}", name))?; + } + + if opts.list { + let owners = registry + .list_owners(&name) + .chain_err(|| format!("failed to list owners of crate {}", name))?; + for owner in owners.iter() { + print!("{}", owner.login); + match (owner.name.as_ref(), owner.email.as_ref()) { + (Some(name), Some(email)) => println!(" ({} <{}>)", name, email), + (Some(s), None) | (None, Some(s)) => println!(" ({})", s), + (None, None) => println!(), + } + } + } + + Ok(()) +} + +pub fn yank( + config: &Config, + krate: Option, + version: Option, + token: Option, + index: Option, + undo: bool, + reg: Option, +) -> CargoResult<()> { + let name = match krate { + Some(name) => name, + None => { + let manifest_path = find_root_manifest_for_wd(config.cwd())?; + let ws = Workspace::new(&manifest_path, config)?; + ws.current()?.package_id().name().to_string() + } + }; + let version = match version { + Some(v) => v, + None => bail!("a version must be specified to yank"), + }; + + let (mut registry, _) = registry(config, token, index, reg, true, true)?; + + if undo { + config + .shell() + .status("Unyank", format!("{}:{}", name, version))?; + registry + .unyank(&name, &version) + .chain_err(|| "failed to undo a yank")?; + } else { + config + .shell() + .status("Yank", format!("{}:{}", name, version))?; + registry + .yank(&name, &version) + .chain_err(|| "failed to yank")?; + } + + Ok(()) +} + +fn get_source_id( + config: &Config, + index: Option, + reg: Option, +) -> CargoResult { + match (reg, index) { + (Some(r), _) => SourceId::alt_registry(config, &r), + (_, Some(i)) => SourceId::for_registry(&i.into_url()?), + _ => { + let map = SourceConfigMap::new(config)?; + let src = map.load(SourceId::crates_io(config)?, &HashSet::new())?; + Ok(src.replaced_source_id()) + } + } +} + +pub fn search( + query: &str, + config: &Config, + index: Option, + limit: u32, + reg: Option, +) -> CargoResult<()> { + fn truncate_with_ellipsis(s: &str, max_width: usize) -> String { + // We should truncate at grapheme-boundary and compute character-widths, + // yet the dependencies on unicode-segmentation and unicode-width are + // not worth it. + let mut chars = s.chars(); + let mut prefix = (&mut chars).take(max_width - 1).collect::(); + if chars.next().is_some() { + prefix.push('…'); + } + prefix + } + + let (mut registry, source_id) = registry(config, None, index, reg, false, false)?; + let (crates, total_crates) = registry + .search(query, limit) + .chain_err(|| "failed to retrieve search results from the registry")?; + + let names = crates + .iter() + .map(|krate| format!("{} = \"{}\"", krate.name, krate.max_version)) + .collect::>(); + + let description_margin = names.iter().map(|s| s.len() + 4).max().unwrap_or_default(); + + let description_length = cmp::max(80, 128 - description_margin); + + let descriptions = crates.iter().map(|krate| { + krate + .description + .as_ref() + .map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), description_length)) + }); + + for (name, description) in names.into_iter().zip(descriptions) { + let line = match description { + Some(desc) => { + let space = repeat(' ') + .take(description_margin - name.len()) + .collect::(); + name + &space + "# " + &desc + } + None => name, + }; + println!("{}", line); + } + + let search_max_limit = 100; + if total_crates > limit && limit < search_max_limit { + println!( + "... and {} crates more (use --limit N to see more)", + total_crates - limit + ); + } else if total_crates > limit && limit >= search_max_limit { + let extra = if source_id.is_default_registry() { + format!( + " (go to https://crates.io/search?q={} to see more)", + percent_encode(query.as_bytes(), NON_ALPHANUMERIC) + ) + } else { + String::new() + }; + println!("... and {} crates more{}", total_crates - limit, extra); + } + + Ok(()) +} diff --git a/src/cargo/sources/registry/index.rs b/src/cargo/sources/registry/index.rs index 67116074cb7..28afb4fe1f7 100644 --- a/src/cargo/sources/registry/index.rs +++ b/src/cargo/sources/registry/index.rs @@ -1,758 +1,762 @@ -//! Management of the index of a registry source -//! -//! This module contains management of the index and various operations, such as -//! actually parsing the index, looking for crates, etc. This is intended to be -//! abstract over remote indices (downloaded via git) and local registry indices -//! (which are all just present on the filesystem). -//! -//! ## Index Performance -//! -//! One important aspect of the index is that we want to optimize the "happy -//! path" as much as possible. Whenever you type `cargo build` Cargo will -//! *always* reparse the registry and learn about dependency information. This -//! is done because Cargo needs to learn about the upstream crates.io crates -//! that you're using and ensure that the preexisting `Cargo.lock` still matches -//! the current state of the world. -//! -//! Consequently, Cargo "null builds" (the index that Cargo adds to each build -//! itself) need to be fast when accessing the index. The primary performance -//! optimization here is to avoid parsing JSON blobs from the registry if we -//! don't need them. Most secondary optimizations are centered around removing -//! allocations and such, but avoiding parsing JSON is the #1 optimization. -//! -//! When we get queries from the resolver we're given a `Dependency`. This -//! dependency in turn has a version requirement, and with lock files that -//! already exist these version requirements are exact version requirements -//! `=a.b.c`. This means that we in theory only need to parse one line of JSON -//! per query in the registry, the one that matches version `a.b.c`. -//! -//! The crates.io index, however, is not amenable to this form of query. Instead -//! the crates.io index simply is a file where each line is a JSON blob. To -//! learn about the versions in each JSON blob we would need to parse the JSON, -//! defeating the purpose of trying to parse as little as possible. -//! -//! > Note that as a small aside even *loading* the JSON from the registry is -//! > actually pretty slow. For crates.io and remote registries we don't -//! > actually check out the git index on disk because that takes quite some -//! > time and is quite large. Instead we use `libgit2` to read the JSON from -//! > the raw git objects. This in turn can be slow (aka show up high in -//! > profiles) because libgit2 has to do deflate decompression and such. -//! -//! To solve all these issues a strategy is employed here where Cargo basically -//! creates an index into the index. The first time a package is queried about -//! (first time being for an entire computer) Cargo will load the contents -//! (slowly via libgit2) from the registry. It will then (slowly) parse every -//! single line to learn about its versions. Afterwards, however, Cargo will -//! emit a new file (a cache) which is amenable for speedily parsing in future -//! invocations. -//! -//! This cache file is currently organized by basically having the semver -//! version extracted from each JSON blob. That way Cargo can quickly and easily -//! parse all versions contained and which JSON blob they're associated with. -//! The JSON blob then doesn't actually need to get parsed unless the version is -//! parsed. -//! -//! Altogether the initial measurements of this shows a massive improvement for -//! Cargo null build performance. It's expected that the improvements earned -//! here will continue to grow over time in the sense that the previous -//! implementation (parse all lines each time) actually continues to slow down -//! over time as new versions of a crate are published. In any case when first -//! implemented a null build of Cargo itself would parse 3700 JSON blobs from -//! the registry and load 150 blobs from git. Afterwards it parses 150 JSON -//! blobs and loads 0 files git. Removing 200ms or more from Cargo's startup -//! time is certainly nothing to sneeze at! -//! -//! Note that this is just a high-level overview, there's of course lots of -//! details like invalidating caches and whatnot which are handled below, but -//! hopefully those are more obvious inline in the code itself. - -use std::collections::{HashMap, HashSet}; -use std::fs; -use std::path::Path; -use std::str; - -use log::info; -use semver::{Version, VersionReq}; - -use crate::core::dependency::Dependency; -use crate::core::{InternedString, PackageId, SourceId, Summary}; -use crate::sources::registry::{RegistryData, RegistryPackage}; -use crate::util::{internal, CargoResult, Config, Filesystem, ToSemver}; - -/// Crates.io treats hyphen and underscores as interchangeable, but the index and old Cargo do not. -/// Therefore, the index must store uncanonicalized version of the name so old Cargo's can find it. -/// This loop tries all possible combinations of switching hyphen and underscores to find the -/// uncanonicalized one. As all stored inputs have the correct spelling, we start with the spelling -/// as-provided. -struct UncanonicalizedIter<'s> { - input: &'s str, - num_hyphen_underscore: u32, - hyphen_combination_num: u16, -} - -impl<'s> UncanonicalizedIter<'s> { - fn new(input: &'s str) -> Self { - let num_hyphen_underscore = input.chars().filter(|&c| c == '_' || c == '-').count() as u32; - UncanonicalizedIter { - input, - num_hyphen_underscore, - hyphen_combination_num: 0, - } - } -} - -impl<'s> Iterator for UncanonicalizedIter<'s> { - type Item = String; - - fn next(&mut self) -> Option { - if self.hyphen_combination_num > 0 - && self.hyphen_combination_num.trailing_zeros() >= self.num_hyphen_underscore - { - return None; - } - - let ret = Some( - self.input - .chars() - .scan(0u16, |s, c| { - // the check against 15 here's to prevent - // shift overflow on inputs with more then 15 hyphens - if (c == '_' || c == '-') && *s <= 15 { - let switch = (self.hyphen_combination_num & (1u16 << *s)) > 0; - let out = if (c == '_') ^ switch { '_' } else { '-' }; - *s += 1; - Some(out) - } else { - Some(c) - } - }) - .collect(), - ); - self.hyphen_combination_num += 1; - ret - } -} - -#[test] -fn no_hyphen() { - assert_eq!( - UncanonicalizedIter::new("test").collect::>(), - vec!["test".to_string()] - ) -} - -#[test] -fn two_hyphen() { - assert_eq!( - UncanonicalizedIter::new("te-_st").collect::>(), - vec![ - "te-_st".to_string(), - "te__st".to_string(), - "te--st".to_string(), - "te_-st".to_string() - ] - ) -} - -#[test] -fn overflow_hyphen() { - assert_eq!( - UncanonicalizedIter::new("te-_-_-_-_-_-_-_-_-st") - .take(100) - .count(), - 100 - ) -} - -pub struct RegistryIndex<'cfg> { - source_id: SourceId, - path: Filesystem, - summaries_cache: HashMap, - config: &'cfg Config, -} - -/// An internal cache of summaries for a particular package. -/// -/// A list of summaries are loaded from disk via one of two methods: -/// -/// 1. Primarily Cargo will parse the corresponding file for a crate in the -/// upstream crates.io registry. That's just a JSON blob per line which we -/// can parse, extract the version, and then store here. -/// -/// 2. Alternatively, if Cargo has previously run, we'll have a cached index of -/// dependencies for the upstream index. This is a file that Cargo maintains -/// lazily on the local filesystem and is much faster to parse since it -/// doesn't involve parsing all of the JSON. -/// -/// The outward-facing interface of this doesn't matter too much where it's -/// loaded from, but it's important when reading the implementation to note that -/// we try to parse as little as possible! -#[derive(Default)] -struct Summaries { - /// A raw vector of uninterpreted bytes. This is what `Unparsed` start/end - /// fields are indexes into. If a `Summaries` is loaded from the crates.io - /// index then this field will be empty since nothing is `Unparsed`. - raw_data: Vec, - - /// All known versions of a crate, keyed from their `Version` to the - /// possibly parsed or unparsed version of the full summary. - versions: HashMap, -} - -/// A lazily parsed `IndexSummary`. -enum MaybeIndexSummary { - /// A summary which has not been parsed, The `start` and `end` are pointers - /// into `Summaries::raw_data` which this is an entry of. - Unparsed { start: usize, end: usize }, - - /// An actually parsed summary. - Parsed(IndexSummary), -} - -/// A parsed representation of a summary from the index. -/// -/// In addition to a full `Summary` we have a few auxiliary pieces of -/// information liked `yanked` and what the checksum hash is. -pub struct IndexSummary { - pub summary: Summary, - pub yanked: bool, - pub hash: String, -} - -/// A representation of the cache on disk that Cargo maintains of summaries. -/// Cargo will initially parse all summaries in the registry and will then -/// serialize that into this form and place it in a new location on disk, -/// ensuring that access in the future is much speedier. -#[derive(Default)] -struct SummariesCache<'a> { - versions: Vec<(Version, &'a [u8])>, -} - -impl<'cfg> RegistryIndex<'cfg> { - pub fn new( - source_id: SourceId, - path: &Filesystem, - config: &'cfg Config, - ) -> RegistryIndex<'cfg> { - RegistryIndex { - source_id, - path: path.clone(), - summaries_cache: HashMap::new(), - config, - } - } - - /// Returns the hash listed for a specified `PackageId`. - pub fn hash(&mut self, pkg: PackageId, load: &mut dyn RegistryData) -> CargoResult { - let req = VersionReq::exact(pkg.version()); - let summary = self - .summaries(pkg.name(), &req, load)? - .next() - .ok_or_else(|| internal(format!("no hash listed for {}", pkg)))?; - Ok(summary.hash.clone()) - } - - /// Load a list of summaries for `name` package in this registry which - /// match `req` - /// - /// This function will semantically parse the on-disk index, match all - /// versions, and then return an iterator over all summaries which matched. - /// Internally there's quite a few layer of caching to amortize this cost - /// though since this method is called quite a lot on null builds in Cargo. - pub fn summaries<'a, 'b>( - &'a mut self, - name: InternedString, - req: &'b VersionReq, - load: &mut dyn RegistryData, - ) -> CargoResult + 'b> - where - 'a: 'b, - { - let source_id = self.source_id; - - // First up actually parse what summaries we have available. If Cargo - // has run previously this will parse a Cargo-specific cache file rather - // than the registry itself. In effect this is intended to be a quite - // cheap operation. - let summaries = self.load_summaries(name, load)?; - - // Iterate over our summaries, extract all relevant ones which match our - // version requirement, and then parse all corresponding rows in the - // registry. As a reminder this `summaries` method is called for each - // entry in a lock file on every build, so we want to absolutely - // minimize the amount of work being done here and parse as little as - // necessary. - let raw_data = &summaries.raw_data; - Ok(summaries - .versions - .iter_mut() - .filter_map(move |(k, v)| if req.matches(k) { Some(v) } else { None }) - .filter_map(move |maybe| match maybe.parse(raw_data, source_id) { - Ok(summary) => Some(summary), - Err(e) => { - info!("failed to parse `{}` registry package: {}", name, e); - None - } - })) - } - - fn load_summaries( - &mut self, - name: InternedString, - load: &mut dyn RegistryData, - ) -> CargoResult<&mut Summaries> { - // If we've previously loaded what versions are present for `name`, just - // return that since our cache should still be valid. - if self.summaries_cache.contains_key(&name) { - return Ok(self.summaries_cache.get_mut(&name).unwrap()); - } - - // Prepare the `RegistryData` which will lazily initialize internal data - // structures. - load.prepare()?; - - // let root = self.config.assert_package_cache_locked(&self.path); - let root = load.assert_index_locked(&self.path); - let cache_root = root.join(".cache"); - let index_version = load.current_version(); - - // See module comment in `registry/mod.rs` for why this is structured - // the way it is. - let fs_name = name - .chars() - .flat_map(|c| c.to_lowercase()) - .collect::(); - let raw_path = match fs_name.len() { - 1 => format!("1/{}", fs_name), - 2 => format!("2/{}", fs_name), - 3 => format!("3/{}/{}", &fs_name[..1], fs_name), - _ => format!("{}/{}/{}", &fs_name[0..2], &fs_name[2..4], fs_name), - }; - - // Attempt to handle misspellings by searching for a chain of related - // names to the original `raw_path` name. Only return summaries - // associated with the first hit, however. The resolver will later - // reject any candidates that have the wrong name, and with this it'll - // along the way produce helpful "did you mean?" suggestions. - for path in UncanonicalizedIter::new(&raw_path).take(1024) { - let summaries = Summaries::parse( - index_version.as_ref().map(|s| &**s), - root, - &cache_root, - path.as_ref(), - self.source_id, - load, - self.config, - )?; - if let Some(summaries) = summaries { - self.summaries_cache.insert(name, summaries); - return Ok(self.summaries_cache.get_mut(&name).unwrap()); - } - } - - // If nothing was found then this crate doesn't exists, so just use an - // empty `Summaries` list. - self.summaries_cache.insert(name, Summaries::default()); - Ok(self.summaries_cache.get_mut(&name).unwrap()) - } - - pub fn query_inner( - &mut self, - dep: &Dependency, - load: &mut dyn RegistryData, - yanked_whitelist: &HashSet, - f: &mut dyn FnMut(Summary), - ) -> CargoResult<()> { - if self.config.offline() - && self.query_inner_with_online(dep, load, yanked_whitelist, f, false)? != 0 - { - return Ok(()); - // If offline, and there are no matches, try again with online. - // This is necessary for dependencies that are not used (such as - // target-cfg or optional), but are not downloaded. Normally the - // build should succeed if they are not downloaded and not used, - // but they still need to resolve. If they are actually needed - // then cargo will fail to download and an error message - // indicating that the required dependency is unavailable while - // offline will be displayed. - } - self.query_inner_with_online(dep, load, yanked_whitelist, f, true)?; - Ok(()) - } - - fn query_inner_with_online( - &mut self, - dep: &Dependency, - load: &mut dyn RegistryData, - yanked_whitelist: &HashSet, - f: &mut dyn FnMut(Summary), - online: bool, - ) -> CargoResult { - let source_id = self.source_id; - let summaries = self - .summaries(dep.package_name(), dep.version_req(), load)? - // First filter summaries for `--offline`. If we're online then - // everything is a candidate, otherwise if we're offline we're only - // going to consider candidates which are actually present on disk. - // - // Note: This particular logic can cause problems with - // optional dependencies when offline. If at least 1 version - // of an optional dependency is downloaded, but that version - // does not satisfy the requirements, then resolution will - // fail. Unfortunately, whether or not something is optional - // is not known here. - .filter(|s| (online || load.is_crate_downloaded(s.summary.package_id()))) - // Next filter out all yanked packages. Some yanked packages may - // leak throguh if they're in a whitelist (aka if they were - // previously in `Cargo.lock` - .filter(|s| !s.yanked || yanked_whitelist.contains(&s.summary.package_id())) - .map(|s| s.summary.clone()); - - // Handle `cargo update --precise` here. If specified, our own source - // will have a precise version listed of the form - // `=o->` where `` is the name of a crate on - // this source, `` is the version installed and ` is the - // version requested (argument to `--precise`). - let name = dep.package_name().as_str(); - let summaries = summaries.filter(|s| match source_id.precise() { - Some(p) if p.starts_with(name) && p[name.len()..].starts_with('=') => { - let mut vers = p[name.len() + 1..].splitn(2, "->"); - if dep - .version_req() - .matches(&vers.next().unwrap().to_semver().unwrap()) - { - vers.next().unwrap() == s.version().to_string() - } else { - true - } - } - _ => true, - }); - - let mut count = 0; - for summary in summaries { - f(summary); - count += 1; - } - Ok(count) - } - - pub fn is_yanked(&mut self, pkg: PackageId, load: &mut dyn RegistryData) -> CargoResult { - let req = VersionReq::exact(pkg.version()); - let found = self - .summaries(pkg.name(), &req, load)? - .any(|summary| summary.yanked); - Ok(found) - } -} - -impl Summaries { - /// Parse out a `Summaries` instances from on-disk state. - /// - /// This will attempt to prefer parsing a previous cache file that already - /// exists from a previous invocation of Cargo (aka you're typing `cargo - /// build` again after typing it previously). If parsing fails or the cache - /// isn't found, then we take a slower path which loads the full descriptor - /// for `relative` from the underlying index (aka typically libgit2 with - /// crates.io) and then parse everything in there. - /// - /// * `index_version` - a version string to describe the current state of - /// the index which for remote registries is the current git sha and - /// for local registries is not available. - /// * `root` - this is the root argument passed to `load` - /// * `cache_root` - this is the root on the filesystem itself of where to - /// store cache files. - /// * `relative` - this is the file we're loading from cache or the index - /// data - /// * `source_id` - the registry's SourceId used when parsing JSON blobs to - /// create summaries. - /// * `load` - the actual index implementation which may be very slow to - /// call. We avoid this if we can. - pub fn parse( - index_version: Option<&str>, - root: &Path, - cache_root: &Path, - relative: &Path, - source_id: SourceId, - load: &mut dyn RegistryData, - config: &Config, - ) -> CargoResult> { - // First up, attempt to load the cache. This could fail for all manner - // of reasons, but consider all of them non-fatal and just log their - // occurrence in case anyone is debugging anything. - let cache_path = cache_root.join(relative); - let mut cache_contents = None; - if let Some(index_version) = index_version { - match fs::read(&cache_path) { - Ok(contents) => match Summaries::parse_cache(contents, index_version) { - Ok(s) => { - log::debug!("fast path for registry cache of {:?}", relative); - if cfg!(debug_assertions) { - cache_contents = Some(s.raw_data); - } else { - return Ok(Some(s)); - } - } - Err(e) => { - log::debug!("failed to parse {:?} cache: {}", relative, e); - } - }, - Err(e) => log::debug!("cache missing for {:?} error: {}", relative, e), - } - } - - // This is the fallback path where we actually talk to libgit2 to load - // information. Here we parse every single line in the index (as we need - // to find the versions) - log::debug!("slow path for {:?}", relative); - let mut ret = Summaries::default(); - let mut hit_closure = false; - let mut cache_bytes = None; - let err = load.load(root, relative, &mut |contents| { - ret.raw_data = contents.to_vec(); - let mut cache = SummariesCache::default(); - hit_closure = true; - for line in split(contents, b'\n') { - // Attempt forwards-compatibility on the index by ignoring - // everything that we ourselves don't understand, that should - // allow future cargo implementations to break the - // interpretation of each line here and older cargo will simply - // ignore the new lines. - let summary = match IndexSummary::parse(line, source_id) { - Ok(summary) => summary, - Err(e) => { - log::info!("failed to parse {:?} registry package: {}", relative, e); - continue; - } - }; - let version = summary.summary.package_id().version().clone(); - cache.versions.push((version.clone(), line)); - ret.versions.insert(version, summary.into()); - } - if let Some(index_version) = index_version { - cache_bytes = Some(cache.serialize(index_version)); - } - Ok(()) - }); - - // We ignore lookup failures as those are just crates which don't exist - // or we haven't updated the registry yet. If we actually ran the - // closure though then we care about those errors. - if !hit_closure { - debug_assert!(cache_contents.is_none()); - return Ok(None); - } - err?; - - // If we've got debug assertions enabled and the cache was previously - // present and considered fresh this is where the debug assertions - // actually happens to verify that our cache is indeed fresh and - // computes exactly the same value as before. - if cfg!(debug_assertions) && cache_contents.is_some() { - assert_eq!(cache_bytes, cache_contents); - } - - // Once we have our `cache_bytes` which represents the `Summaries` we're - // about to return, write that back out to disk so future Cargo - // invocations can use it. - // - // This is opportunistic so we ignore failure here but are sure to log - // something in case of error. - if let Some(cache_bytes) = cache_bytes { - if fs::create_dir_all(cache_path.parent().unwrap()).is_ok() { - let path = Filesystem::new(cache_path.clone()); - config.assert_package_cache_locked(&path); - if let Err(e) = fs::write(cache_path, cache_bytes) { - log::info!("failed to write cache: {}", e); - } - } - } - - Ok(Some(ret)) - } - - /// Parses an open `File` which represents information previously cached by - /// Cargo. - pub fn parse_cache(contents: Vec, last_index_update: &str) -> CargoResult { - let cache = SummariesCache::parse(&contents, last_index_update)?; - let mut ret = Summaries::default(); - for (version, summary) in cache.versions { - let (start, end) = subslice_bounds(&contents, summary); - ret.versions - .insert(version, MaybeIndexSummary::Unparsed { start, end }); - } - ret.raw_data = contents; - return Ok(ret); - - // Returns the start/end offsets of `inner` with `outer`. Asserts that - // `inner` is a subslice of `outer`. - fn subslice_bounds(outer: &[u8], inner: &[u8]) -> (usize, usize) { - let outer_start = outer.as_ptr() as usize; - let outer_end = outer_start + outer.len(); - let inner_start = inner.as_ptr() as usize; - let inner_end = inner_start + inner.len(); - assert!(inner_start >= outer_start); - assert!(inner_end <= outer_end); - (inner_start - outer_start, inner_end - outer_start) - } - } -} - -// Implementation of serializing/deserializing the cache of summaries on disk. -// Currently the format looks like: -// -// +--------------+-------------+---+ -// | version byte | git sha rev | 0 | -// +--------------+-------------+---+ -// -// followed by... -// -// +----------------+---+------------+---+ -// | semver version | 0 | JSON blob | 0 | ... -// +----------------+---+------------+---+ -// -// The idea is that this is a very easy file for Cargo to parse in future -// invocations. The read from disk should be quite fast and then afterwards all -// we need to know is what versions correspond to which JSON blob. -// -// The leading version byte is intended to ensure that there's some level of -// future compatibility against changes to this cache format so if different -// versions of Cargo share the same cache they don't get too confused. The git -// sha lets us know when the file needs to be regenerated (it needs regeneration -// whenever the index itself updates). - -const CURRENT_CACHE_VERSION: u8 = 1; - -impl<'a> SummariesCache<'a> { - fn parse(data: &'a [u8], last_index_update: &str) -> CargoResult> { - // NB: keep this method in sync with `serialize` below - let (first_byte, rest) = data - .split_first() - .ok_or_else(|| failure::format_err!("malformed cache"))?; - if *first_byte != CURRENT_CACHE_VERSION { - failure::bail!("looks like a different Cargo's cache, bailing out"); - } - let mut iter = split(rest, 0); - if let Some(update) = iter.next() { - if update != last_index_update.as_bytes() { - failure::bail!( - "cache out of date: current index ({}) != cache ({})", - last_index_update, - str::from_utf8(update)?, - ) - } - } else { - failure::bail!("malformed file"); - } - let mut ret = SummariesCache::default(); - while let Some(version) = iter.next() { - let version = str::from_utf8(version)?; - let version = Version::parse(version)?; - let summary = iter.next().unwrap(); - ret.versions.push((version, summary)); - } - Ok(ret) - } - - fn serialize(&self, index_version: &str) -> Vec { - // NB: keep this method in sync with `parse` above - let size = self - .versions - .iter() - .map(|(_version, data)| (10 + data.len())) - .sum(); - let mut contents = Vec::with_capacity(size); - contents.push(CURRENT_CACHE_VERSION); - contents.extend_from_slice(index_version.as_bytes()); - contents.push(0); - for (version, data) in self.versions.iter() { - contents.extend_from_slice(version.to_string().as_bytes()); - contents.push(0); - contents.extend_from_slice(data); - contents.push(0); - } - contents - } -} - -impl MaybeIndexSummary { - /// Parses this "maybe a summary" into a `Parsed` for sure variant. - /// - /// Does nothing if this is already `Parsed`, and otherwise the `raw_data` - /// passed in is sliced with the bounds in `Unparsed` and then actually - /// parsed. - fn parse(&mut self, raw_data: &[u8], source_id: SourceId) -> CargoResult<&IndexSummary> { - let (start, end) = match self { - MaybeIndexSummary::Unparsed { start, end } => (*start, *end), - MaybeIndexSummary::Parsed(summary) => return Ok(summary), - }; - let summary = IndexSummary::parse(&raw_data[start..end], source_id)?; - *self = MaybeIndexSummary::Parsed(summary); - match self { - MaybeIndexSummary::Unparsed { .. } => unreachable!(), - MaybeIndexSummary::Parsed(summary) => Ok(summary), - } - } -} - -impl From for MaybeIndexSummary { - fn from(summary: IndexSummary) -> MaybeIndexSummary { - MaybeIndexSummary::Parsed(summary) - } -} - -impl IndexSummary { - /// Parses a line from the registry's index file into an `IndexSummary` for - /// a package. - /// - /// The `line` provided is expected to be valid JSON. - fn parse(line: &[u8], source_id: SourceId) -> CargoResult { - let RegistryPackage { - name, - vers, - cksum, - deps, - features, - yanked, - links, - } = serde_json::from_slice(line)?; - log::trace!("json parsed registry {}/{}", name, vers); - let pkgid = PackageId::new(&name, &vers, source_id)?; - let deps = deps - .into_iter() - .map(|dep| dep.into_dep(source_id)) - .collect::>>()?; - let mut summary = Summary::new(pkgid, deps, &features, links, false)?; - summary.set_checksum(cksum.clone()); - Ok(IndexSummary { - summary, - yanked: yanked.unwrap_or(false), - hash: cksum, - }) - } -} - -fn split<'a>(haystack: &'a [u8], needle: u8) -> impl Iterator + 'a { - struct Split<'a> { - haystack: &'a [u8], - needle: u8, - } - - impl<'a> Iterator for Split<'a> { - type Item = &'a [u8]; - - fn next(&mut self) -> Option<&'a [u8]> { - if self.haystack.is_empty() { - return None; - } - let (ret, remaining) = match memchr::memchr(self.needle, self.haystack) { - Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]), - None => (self.haystack, &[][..]), - }; - self.haystack = remaining; - Some(ret) - } - } - - Split { haystack, needle } -} +//! Management of the index of a registry source +//! +//! This module contains management of the index and various operations, such as +//! actually parsing the index, looking for crates, etc. This is intended to be +//! abstract over remote indices (downloaded via git) and local registry indices +//! (which are all just present on the filesystem). +//! +//! ## Index Performance +//! +//! One important aspect of the index is that we want to optimize the "happy +//! path" as much as possible. Whenever you type `cargo build` Cargo will +//! *always* reparse the registry and learn about dependency information. This +//! is done because Cargo needs to learn about the upstream crates.io crates +//! that you're using and ensure that the preexisting `Cargo.lock` still matches +//! the current state of the world. +//! +//! Consequently, Cargo "null builds" (the index that Cargo adds to each build +//! itself) need to be fast when accessing the index. The primary performance +//! optimization here is to avoid parsing JSON blobs from the registry if we +//! don't need them. Most secondary optimizations are centered around removing +//! allocations and such, but avoiding parsing JSON is the #1 optimization. +//! +//! When we get queries from the resolver we're given a `Dependency`. This +//! dependency in turn has a version requirement, and with lock files that +//! already exist these version requirements are exact version requirements +//! `=a.b.c`. This means that we in theory only need to parse one line of JSON +//! per query in the registry, the one that matches version `a.b.c`. +//! +//! The crates.io index, however, is not amenable to this form of query. Instead +//! the crates.io index simply is a file where each line is a JSON blob. To +//! learn about the versions in each JSON blob we would need to parse the JSON, +//! defeating the purpose of trying to parse as little as possible. +//! +//! > Note that as a small aside even *loading* the JSON from the registry is +//! > actually pretty slow. For crates.io and remote registries we don't +//! > actually check out the git index on disk because that takes quite some +//! > time and is quite large. Instead we use `libgit2` to read the JSON from +//! > the raw git objects. This in turn can be slow (aka show up high in +//! > profiles) because libgit2 has to do deflate decompression and such. +//! +//! To solve all these issues a strategy is employed here where Cargo basically +//! creates an index into the index. The first time a package is queried about +//! (first time being for an entire computer) Cargo will load the contents +//! (slowly via libgit2) from the registry. It will then (slowly) parse every +//! single line to learn about its versions. Afterwards, however, Cargo will +//! emit a new file (a cache) which is amenable for speedily parsing in future +//! invocations. +//! +//! This cache file is currently organized by basically having the semver +//! version extracted from each JSON blob. That way Cargo can quickly and easily +//! parse all versions contained and which JSON blob they're associated with. +//! The JSON blob then doesn't actually need to get parsed unless the version is +//! parsed. +//! +//! Altogether the initial measurements of this shows a massive improvement for +//! Cargo null build performance. It's expected that the improvements earned +//! here will continue to grow over time in the sense that the previous +//! implementation (parse all lines each time) actually continues to slow down +//! over time as new versions of a crate are published. In any case when first +//! implemented a null build of Cargo itself would parse 3700 JSON blobs from +//! the registry and load 150 blobs from git. Afterwards it parses 150 JSON +//! blobs and loads 0 files git. Removing 200ms or more from Cargo's startup +//! time is certainly nothing to sneeze at! +//! +//! Note that this is just a high-level overview, there's of course lots of +//! details like invalidating caches and whatnot which are handled below, but +//! hopefully those are more obvious inline in the code itself. + +use std::collections::{HashMap, HashSet}; +use std::fs; +use std::path::Path; +use std::str; + +use log::info; +use semver::{Version, VersionReq}; + +use crate::core::dependency::Dependency; +use crate::core::{InternedString, PackageId, SourceId, Summary}; +use crate::sources::registry::{RegistryData, RegistryPackage}; +use crate::util::{internal, CargoResult, Config, Filesystem, ToSemver}; + +/// Crates.io treats hyphen and underscores as interchangeable, but the index and old Cargo do not. +/// Therefore, the index must store uncanonicalized version of the name so old Cargo's can find it. +/// This loop tries all possible combinations of switching hyphen and underscores to find the +/// uncanonicalized one. As all stored inputs have the correct spelling, we start with the spelling +/// as-provided. +struct UncanonicalizedIter<'s> { + input: &'s str, + num_hyphen_underscore: u32, + hyphen_combination_num: u16, +} + +impl<'s> UncanonicalizedIter<'s> { + fn new(input: &'s str) -> Self { + let num_hyphen_underscore = input.chars().filter(|&c| c == '_' || c == '-').count() as u32; + UncanonicalizedIter { + input, + num_hyphen_underscore, + hyphen_combination_num: 0, + } + } +} + +impl<'s> Iterator for UncanonicalizedIter<'s> { + type Item = String; + + fn next(&mut self) -> Option { + if self.hyphen_combination_num > 0 + && self.hyphen_combination_num.trailing_zeros() >= self.num_hyphen_underscore + { + return None; + } + + let ret = Some( + self.input + .chars() + .scan(0u16, |s, c| { + // the check against 15 here's to prevent + // shift overflow on inputs with more then 15 hyphens + if (c == '_' || c == '-') && *s <= 15 { + let switch = (self.hyphen_combination_num & (1u16 << *s)) > 0; + let out = if (c == '_') ^ switch { '_' } else { '-' }; + *s += 1; + Some(out) + } else { + Some(c) + } + }) + .collect(), + ); + self.hyphen_combination_num += 1; + ret + } +} + +#[test] +fn no_hyphen() { + assert_eq!( + UncanonicalizedIter::new("test").collect::>(), + vec!["test".to_string()] + ) +} + +#[test] +fn two_hyphen() { + assert_eq!( + UncanonicalizedIter::new("te-_st").collect::>(), + vec![ + "te-_st".to_string(), + "te__st".to_string(), + "te--st".to_string(), + "te_-st".to_string() + ] + ) +} + +#[test] +fn overflow_hyphen() { + assert_eq!( + UncanonicalizedIter::new("te-_-_-_-_-_-_-_-_-st") + .take(100) + .count(), + 100 + ) +} + +pub struct RegistryIndex<'cfg> { + source_id: SourceId, + path: Filesystem, + summaries_cache: HashMap, + config: &'cfg Config, +} + +/// An internal cache of summaries for a particular package. +/// +/// A list of summaries are loaded from disk via one of two methods: +/// +/// 1. Primarily Cargo will parse the corresponding file for a crate in the +/// upstream crates.io registry. That's just a JSON blob per line which we +/// can parse, extract the version, and then store here. +/// +/// 2. Alternatively, if Cargo has previously run, we'll have a cached index of +/// dependencies for the upstream index. This is a file that Cargo maintains +/// lazily on the local filesystem and is much faster to parse since it +/// doesn't involve parsing all of the JSON. +/// +/// The outward-facing interface of this doesn't matter too much where it's +/// loaded from, but it's important when reading the implementation to note that +/// we try to parse as little as possible! +#[derive(Default)] +struct Summaries { + /// A raw vector of uninterpreted bytes. This is what `Unparsed` start/end + /// fields are indexes into. If a `Summaries` is loaded from the crates.io + /// index then this field will be empty since nothing is `Unparsed`. + raw_data: Vec, + + /// All known versions of a crate, keyed from their `Version` to the + /// possibly parsed or unparsed version of the full summary. + versions: HashMap, +} + +/// A lazily parsed `IndexSummary`. +enum MaybeIndexSummary { + /// A summary which has not been parsed, The `start` and `end` are pointers + /// into `Summaries::raw_data` which this is an entry of. + Unparsed { start: usize, end: usize }, + + /// An actually parsed summary. + Parsed(IndexSummary), +} + +/// A parsed representation of a summary from the index. +/// +/// In addition to a full `Summary` we have a few auxiliary pieces of +/// information liked `yanked` and what the checksum hash is. +pub struct IndexSummary { + pub summary: Summary, + pub yanked: bool, + pub hash: String, +} + +/// A representation of the cache on disk that Cargo maintains of summaries. +/// Cargo will initially parse all summaries in the registry and will then +/// serialize that into this form and place it in a new location on disk, +/// ensuring that access in the future is much speedier. +#[derive(Default)] +struct SummariesCache<'a> { + versions: Vec<(Version, &'a [u8])>, +} + +impl<'cfg> RegistryIndex<'cfg> { + pub fn new( + source_id: SourceId, + path: &Filesystem, + config: &'cfg Config, + ) -> RegistryIndex<'cfg> { + RegistryIndex { + source_id, + path: path.clone(), + summaries_cache: HashMap::new(), + config, + } + } + + /// Returns the hash listed for a specified `PackageId`. + pub fn hash(&mut self, pkg: PackageId, load: &mut dyn RegistryData) -> CargoResult { + let req = VersionReq::exact(pkg.version()); + let summary = self + .summaries(pkg.name(), &req, load)? + .next() + .ok_or_else(|| internal(format!("no hash listed for {}", pkg)))?; + Ok(summary.hash.clone()) + } + + /// Load a list of summaries for `name` package in this registry which + /// match `req` + /// + /// This function will semantically parse the on-disk index, match all + /// versions, and then return an iterator over all summaries which matched. + /// Internally there's quite a few layer of caching to amortize this cost + /// though since this method is called quite a lot on null builds in Cargo. + pub fn summaries<'a, 'b>( + &'a mut self, + name: InternedString, + req: &'b VersionReq, + load: &mut dyn RegistryData, + ) -> CargoResult + 'b> + where + 'a: 'b, + { + let source_id = self.source_id; + + // First up actually parse what summaries we have available. If Cargo + // has run previously this will parse a Cargo-specific cache file rather + // than the registry itself. In effect this is intended to be a quite + // cheap operation. + let summaries = self.load_summaries(name, load)?; + + // Iterate over our summaries, extract all relevant ones which match our + // version requirement, and then parse all corresponding rows in the + // registry. As a reminder this `summaries` method is called for each + // entry in a lock file on every build, so we want to absolutely + // minimize the amount of work being done here and parse as little as + // necessary. + let raw_data = &summaries.raw_data; + Ok(summaries + .versions + .iter_mut() + .filter_map(move |(k, v)| if req.matches(k) { Some(v) } else { None }) + .filter_map(move |maybe| match maybe.parse(raw_data, source_id) { + Ok(summary) => Some(summary), + Err(e) => { + info!("failed to parse `{}` registry package: {}", name, e); + None + } + })) + } + + fn load_summaries( + &mut self, + name: InternedString, + load: &mut dyn RegistryData, + ) -> CargoResult<&mut Summaries> { + // If we've previously loaded what versions are present for `name`, just + // return that since our cache should still be valid. + if self.summaries_cache.contains_key(&name) { + return Ok(self.summaries_cache.get_mut(&name).unwrap()); + } + + // Prepare the `RegistryData` which will lazily initialize internal data + // structures. + load.prepare()?; + + // let root = self.config.assert_package_cache_locked(&self.path); + let root = load.assert_index_locked(&self.path); + let cache_root = root.join(".cache"); + let index_version = load.current_version(); + + // See module comment in `registry/mod.rs` for why this is structured + // the way it is. + let fs_name = name + .chars() + .flat_map(|c| c.to_lowercase()) + .collect::(); + let raw_path = match fs_name.len() { + 1 => format!("1/{}", fs_name), + 2 => format!("2/{}", fs_name), + 3 => format!("3/{}/{}", &fs_name[..1], fs_name), + _ => format!("{}/{}/{}", &fs_name[0..2], &fs_name[2..4], fs_name), + }; + + // Attempt to handle misspellings by searching for a chain of related + // names to the original `raw_path` name. Only return summaries + // associated with the first hit, however. The resolver will later + // reject any candidates that have the wrong name, and with this it'll + // along the way produce helpful "did you mean?" suggestions. + for path in UncanonicalizedIter::new(&raw_path).take(1024) { + let summaries = Summaries::parse( + index_version.as_ref().map(|s| &**s), + root, + &cache_root, + path.as_ref(), + self.source_id, + load, + self.config, + )?; + if let Some(summaries) = summaries { + self.summaries_cache.insert(name, summaries); + return Ok(self.summaries_cache.get_mut(&name).unwrap()); + } + } + + // If nothing was found then this crate doesn't exists, so just use an + // empty `Summaries` list. + self.summaries_cache.insert(name, Summaries::default()); + Ok(self.summaries_cache.get_mut(&name).unwrap()) + } + + pub fn query_inner( + &mut self, + dep: &Dependency, + load: &mut dyn RegistryData, + yanked_whitelist: &HashSet, + f: &mut dyn FnMut(Summary), + ) -> CargoResult<()> { + if self.config.offline() + && self.query_inner_with_online(dep, load, yanked_whitelist, f, false)? != 0 + { + return Ok(()); + // If offline, and there are no matches, try again with online. + // This is necessary for dependencies that are not used (such as + // target-cfg or optional), but are not downloaded. Normally the + // build should succeed if they are not downloaded and not used, + // but they still need to resolve. If they are actually needed + // then cargo will fail to download and an error message + // indicating that the required dependency is unavailable while + // offline will be displayed. + } + self.query_inner_with_online(dep, load, yanked_whitelist, f, true)?; + Ok(()) + } + + fn query_inner_with_online( + &mut self, + dep: &Dependency, + load: &mut dyn RegistryData, + yanked_whitelist: &HashSet, + f: &mut dyn FnMut(Summary), + online: bool, + ) -> CargoResult { + let source_id = self.source_id; + let summaries = self + .summaries(dep.package_name(), dep.version_req(), load)? + // First filter summaries for `--offline`. If we're online then + // everything is a candidate, otherwise if we're offline we're only + // going to consider candidates which are actually present on disk. + // + // Note: This particular logic can cause problems with + // optional dependencies when offline. If at least 1 version + // of an optional dependency is downloaded, but that version + // does not satisfy the requirements, then resolution will + // fail. Unfortunately, whether or not something is optional + // is not known here. + .filter(|s| (online || load.is_crate_downloaded(s.summary.package_id()))) + // Next filter out all yanked packages. Some yanked packages may + // leak throguh if they're in a whitelist (aka if they were + // previously in `Cargo.lock` + .filter(|s| !s.yanked || yanked_whitelist.contains(&s.summary.package_id())) + .map(|s| s.summary.clone()); + + // Handle `cargo update --precise` here. If specified, our own source + // will have a precise version listed of the form + // `=o->` where `` is the name of a crate on + // this source, `` is the version installed and ` is the + // version requested (argument to `--precise`). + let name = dep.package_name().as_str(); + let summaries = summaries.filter(|s| match source_id.precise() { + Some(p) if p.starts_with(name) && p[name.len()..].starts_with('=') => { + let mut vers = p[name.len() + 1..].splitn(2, "->"); + if dep + .version_req() + .matches(&vers.next().unwrap().to_semver().unwrap()) + { + vers.next().unwrap() == s.version().to_string() + } else { + true + } + } + _ => true, + }); + + let mut count = 0; + for summary in summaries { + f(summary); + count += 1; + } + Ok(count) + } + + pub fn is_yanked(&mut self, pkg: PackageId, load: &mut dyn RegistryData) -> CargoResult { + let req = VersionReq::exact(pkg.version()); + let found = self + .summaries(pkg.name(), &req, load)? + .any(|summary| summary.yanked); + Ok(found) + } +} + +impl Summaries { + /// Parse out a `Summaries` instances from on-disk state. + /// + /// This will attempt to prefer parsing a previous cache file that already + /// exists from a previous invocation of Cargo (aka you're typing `cargo + /// build` again after typing it previously). If parsing fails or the cache + /// isn't found, then we take a slower path which loads the full descriptor + /// for `relative` from the underlying index (aka typically libgit2 with + /// crates.io) and then parse everything in there. + /// + /// * `index_version` - a version string to describe the current state of + /// the index which for remote registries is the current git sha and + /// for local registries is not available. + /// * `root` - this is the root argument passed to `load` + /// * `cache_root` - this is the root on the filesystem itself of where to + /// store cache files. + /// * `relative` - this is the file we're loading from cache or the index + /// data + /// * `source_id` - the registry's SourceId used when parsing JSON blobs to + /// create summaries. + /// * `load` - the actual index implementation which may be very slow to + /// call. We avoid this if we can. + pub fn parse( + index_version: Option<&str>, + root: &Path, + cache_root: &Path, + relative: &Path, + source_id: SourceId, + load: &mut dyn RegistryData, + config: &Config, + ) -> CargoResult> { + // First up, attempt to load the cache. This could fail for all manner + // of reasons, but consider all of them non-fatal and just log their + // occurrence in case anyone is debugging anything. + let cache_path = cache_root.join(relative); + let mut cache_contents = None; + if let Some(index_version) = index_version { + match fs::read(&cache_path) { + Ok(contents) => match Summaries::parse_cache(contents, index_version) { + Ok(s) => { + log::debug!("fast path for registry cache of {:?}", relative); + if cfg!(debug_assertions) { + cache_contents = Some(s.raw_data); + } else { + return Ok(Some(s)); + } + } + Err(e) => { + log::debug!("failed to parse {:?} cache: {}", relative, e); + } + }, + Err(e) => log::debug!("cache missing for {:?} error: {}", relative, e), + } + } + + // This is the fallback path where we actually talk to libgit2 to load + // information. Here we parse every single line in the index (as we need + // to find the versions) + log::debug!("slow path for {:?}", relative); + let mut ret = Summaries::default(); + let mut hit_closure = false; + let mut cache_bytes = None; + let err = load.load(root, relative, &mut |contents| { + ret.raw_data = contents.to_vec(); + let mut cache = SummariesCache::default(); + hit_closure = true; + for line in split(contents, b'\n') { + // Attempt forwards-compatibility on the index by ignoring + // everything that we ourselves don't understand, that should + // allow future cargo implementations to break the + // interpretation of each line here and older cargo will simply + // ignore the new lines. + let summary = match IndexSummary::parse(line, source_id) { + Ok(summary) => summary, + Err(e) => { + log::info!("failed to parse {:?} registry package: {}", relative, e); + continue; + } + }; + let version = summary.summary.package_id().version().clone(); + cache.versions.push((version.clone(), line)); + ret.versions.insert(version, summary.into()); + } + if let Some(index_version) = index_version { + cache_bytes = Some(cache.serialize(index_version)); + } + Ok(()) + }); + + // We ignore lookup failures as those are just crates which don't exist + // or we haven't updated the registry yet. If we actually ran the + // closure though then we care about those errors. + if !hit_closure { + debug_assert!(cache_contents.is_none()); + return Ok(None); + } + err?; + + // If we've got debug assertions enabled and the cache was previously + // present and considered fresh this is where the debug assertions + // actually happens to verify that our cache is indeed fresh and + // computes exactly the same value as before. + if cfg!(debug_assertions) && cache_contents.is_some() { + assert_eq!(cache_bytes, cache_contents); + } + + // Once we have our `cache_bytes` which represents the `Summaries` we're + // about to return, write that back out to disk so future Cargo + // invocations can use it. + // + // This is opportunistic so we ignore failure here but are sure to log + // something in case of error. + if let Some(cache_bytes) = cache_bytes { + if fs::create_dir_all(cache_path.parent().unwrap()).is_ok() { + let path = Filesystem::new(cache_path.clone()); + config.assert_package_cache_locked(&path); + if let Err(e) = fs::write(cache_path, cache_bytes) { + log::info!("failed to write cache: {}", e); + } + } + } + + Ok(Some(ret)) + } + + /// Parses an open `File` which represents information previously cached by + /// Cargo. + pub fn parse_cache(contents: Vec, last_index_update: &str) -> CargoResult { + let cache = SummariesCache::parse(&contents, last_index_update)?; + let mut ret = Summaries::default(); + for (version, summary) in cache.versions { + let (start, end) = subslice_bounds(&contents, summary); + ret.versions + .insert(version, MaybeIndexSummary::Unparsed { start, end }); + } + ret.raw_data = contents; + return Ok(ret); + + // Returns the start/end offsets of `inner` with `outer`. Asserts that + // `inner` is a subslice of `outer`. + fn subslice_bounds(outer: &[u8], inner: &[u8]) -> (usize, usize) { + let outer_start = outer.as_ptr() as usize; + let outer_end = outer_start + outer.len(); + let inner_start = inner.as_ptr() as usize; + let inner_end = inner_start + inner.len(); + assert!(inner_start >= outer_start); + assert!(inner_end <= outer_end); + (inner_start - outer_start, inner_end - outer_start) + } + } +} + +// Implementation of serializing/deserializing the cache of summaries on disk. +// Currently the format looks like: +// +// +--------------+-------------+---+ +// | version byte | git sha rev | 0 | +// +--------------+-------------+---+ +// +// followed by... +// +// +----------------+---+------------+---+ +// | semver version | 0 | JSON blob | 0 | ... +// +----------------+---+------------+---+ +// +// The idea is that this is a very easy file for Cargo to parse in future +// invocations. The read from disk should be quite fast and then afterwards all +// we need to know is what versions correspond to which JSON blob. +// +// The leading version byte is intended to ensure that there's some level of +// future compatibility against changes to this cache format so if different +// versions of Cargo share the same cache they don't get too confused. The git +// sha lets us know when the file needs to be regenerated (it needs regeneration +// whenever the index itself updates). + +const CURRENT_CACHE_VERSION: u8 = 1; + +impl<'a> SummariesCache<'a> { + fn parse(data: &'a [u8], last_index_update: &str) -> CargoResult> { + // NB: keep this method in sync with `serialize` below + let (first_byte, rest) = data + .split_first() + .ok_or_else(|| failure::format_err!("malformed cache"))?; + if *first_byte != CURRENT_CACHE_VERSION { + failure::bail!("looks like a different Cargo's cache, bailing out"); + } + let mut iter = split(rest, 0); + if let Some(update) = iter.next() { + if update != last_index_update.as_bytes() { + failure::bail!( + "cache out of date: current index ({}) != cache ({})", + last_index_update, + str::from_utf8(update)?, + ) + } + } else { + failure::bail!("malformed file"); + } + let mut ret = SummariesCache::default(); + while let Some(version) = iter.next() { + let version = str::from_utf8(version)?; + let version = Version::parse(version)?; + let summary = iter.next().unwrap(); + ret.versions.push((version, summary)); + } + Ok(ret) + } + + fn serialize(&self, index_version: &str) -> Vec { + // NB: keep this method in sync with `parse` above + let size = self + .versions + .iter() + .map(|(_version, data)| (10 + data.len())) + .sum(); + let mut contents = Vec::with_capacity(size); + contents.push(CURRENT_CACHE_VERSION); + contents.extend_from_slice(index_version.as_bytes()); + contents.push(0); + for (version, data) in self.versions.iter() { + contents.extend_from_slice(version.to_string().as_bytes()); + contents.push(0); + contents.extend_from_slice(data); + contents.push(0); + } + contents + } +} + +impl MaybeIndexSummary { + /// Parses this "maybe a summary" into a `Parsed` for sure variant. + /// + /// Does nothing if this is already `Parsed`, and otherwise the `raw_data` + /// passed in is sliced with the bounds in `Unparsed` and then actually + /// parsed. + fn parse(&mut self, raw_data: &[u8], source_id: SourceId) -> CargoResult<&IndexSummary> { + let (start, end) = match self { + MaybeIndexSummary::Unparsed { start, end } => (*start, *end), + MaybeIndexSummary::Parsed(summary) => return Ok(summary), + }; + let summary = IndexSummary::parse(&raw_data[start..end], source_id)?; + *self = MaybeIndexSummary::Parsed(summary); + match self { + MaybeIndexSummary::Unparsed { .. } => unreachable!(), + MaybeIndexSummary::Parsed(summary) => Ok(summary), + } + } +} + +impl From for MaybeIndexSummary { + fn from(summary: IndexSummary) -> MaybeIndexSummary { + MaybeIndexSummary::Parsed(summary) + } +} + +impl IndexSummary { + /// Parses a line from the registry's index file into an `IndexSummary` for + /// a package. + /// + /// The `line` provided is expected to be valid JSON. + fn parse(line: &[u8], source_id: SourceId) -> CargoResult { + let RegistryPackage { + name, + vers, + cksum, + deps, + features, + yanked, + links, + } = serde_json::from_slice(line)?; + log::trace!("json parsed registry {}/{}", name, vers); + let pkgid = PackageId::new(&name, &vers, source_id)?; + let deps = deps + .into_iter() + .map(|dep| dep.into_dep(source_id)) + .collect::>>()?; + let ftrs = features + .iter() + .map(|(k, v)| (k.clone(), (None, v.clone()))) + .collect(); + let mut summary = Summary::new(pkgid, deps, &ftrs, links, false)?; + summary.set_checksum(cksum.clone()); + Ok(IndexSummary { + summary, + yanked: yanked.unwrap_or(false), + hash: cksum, + }) + } +} + +fn split<'a>(haystack: &'a [u8], needle: u8) -> impl Iterator + 'a { + struct Split<'a> { + haystack: &'a [u8], + needle: u8, + } + + impl<'a> Iterator for Split<'a> { + type Item = &'a [u8]; + + fn next(&mut self) -> Option<&'a [u8]> { + if self.haystack.is_empty() { + return None; + } + let (ret, remaining) = match memchr::memchr(self.needle, self.haystack) { + Some(pos) => (&self.haystack[..pos], &self.haystack[pos + 1..]), + None => (self.haystack, &[][..]), + }; + self.haystack = remaining; + Some(ret) + } + } + + Split { haystack, needle } +} diff --git a/src/cargo/util/cfg.rs b/src/cargo/util/cfg.rs index 4c4ad232df4..48fb5fa431b 100644 --- a/src/cargo/util/cfg.rs +++ b/src/cargo/util/cfg.rs @@ -1,278 +1,331 @@ -use std::fmt; -use std::iter; -use std::str::{self, FromStr}; - -use crate::util::CargoResult; - -#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] -pub enum Cfg { - Name(String), - KeyPair(String, String), -} - -#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] -pub enum CfgExpr { - Not(Box), - All(Vec), - Any(Vec), - Value(Cfg), -} - -#[derive(PartialEq)] -enum Token<'a> { - LeftParen, - RightParen, - Ident(&'a str), - Comma, - Equals, - String(&'a str), -} - -struct Tokenizer<'a> { - s: iter::Peekable>, - orig: &'a str, -} - -struct Parser<'a> { - t: iter::Peekable>, -} - -impl FromStr for Cfg { - type Err = failure::Error; - - fn from_str(s: &str) -> CargoResult { - let mut p = Parser::new(s); - let e = p.cfg()?; - if p.t.next().is_some() { - failure::bail!("malformed cfg value or key/value pair: `{}`", s) - } - Ok(e) - } -} - -impl fmt::Display for Cfg { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Cfg::Name(ref s) => s.fmt(f), - Cfg::KeyPair(ref k, ref v) => write!(f, "{} = \"{}\"", k, v), - } - } -} - -impl CfgExpr { - /// Utility function to check if the key, "cfg(..)" matches the `target_cfg` - pub fn matches_key(key: &str, target_cfg: &[Cfg]) -> bool { - if key.starts_with("cfg(") && key.ends_with(')') { - let cfg = &key[4..key.len() - 1]; - - CfgExpr::from_str(cfg) - .ok() - .map(|ce| ce.matches(target_cfg)) - .unwrap_or(false) - } else { - false - } - } - - pub fn matches(&self, cfg: &[Cfg]) -> bool { - match *self { - CfgExpr::Not(ref e) => !e.matches(cfg), - CfgExpr::All(ref e) => e.iter().all(|e| e.matches(cfg)), - CfgExpr::Any(ref e) => e.iter().any(|e| e.matches(cfg)), - CfgExpr::Value(ref e) => cfg.contains(e), - } - } -} - -impl FromStr for CfgExpr { - type Err = failure::Error; - - fn from_str(s: &str) -> CargoResult { - let mut p = Parser::new(s); - let e = p.expr()?; - if p.t.next().is_some() { - failure::bail!( - "can only have one cfg-expression, consider using all() or \ - any() explicitly" - ) - } - Ok(e) - } -} - -impl fmt::Display for CfgExpr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - CfgExpr::Not(ref e) => write!(f, "not({})", e), - CfgExpr::All(ref e) => write!(f, "all({})", CommaSep(e)), - CfgExpr::Any(ref e) => write!(f, "any({})", CommaSep(e)), - CfgExpr::Value(ref e) => write!(f, "{}", e), - } - } -} - -struct CommaSep<'a, T>(&'a [T]); - -impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - for (i, v) in self.0.iter().enumerate() { - if i > 0 { - write!(f, ", ")?; - } - write!(f, "{}", v)?; - } - Ok(()) - } -} - -impl<'a> Parser<'a> { - fn new(s: &'a str) -> Parser<'a> { - Parser { - t: Tokenizer { - s: s.char_indices().peekable(), - orig: s, - } - .peekable(), - } - } - - fn expr(&mut self) -> CargoResult { - match self.t.peek() { - Some(&Ok(Token::Ident(op @ "all"))) | Some(&Ok(Token::Ident(op @ "any"))) => { - self.t.next(); - let mut e = Vec::new(); - self.eat(&Token::LeftParen)?; - while !self.r#try(&Token::RightParen) { - e.push(self.expr()?); - if !self.r#try(&Token::Comma) { - self.eat(&Token::RightParen)?; - break; - } - } - if op == "all" { - Ok(CfgExpr::All(e)) - } else { - Ok(CfgExpr::Any(e)) - } - } - Some(&Ok(Token::Ident("not"))) => { - self.t.next(); - self.eat(&Token::LeftParen)?; - let e = self.expr()?; - self.eat(&Token::RightParen)?; - Ok(CfgExpr::Not(Box::new(e))) - } - Some(&Ok(..)) => self.cfg().map(CfgExpr::Value), - Some(&Err(..)) => Err(self.t.next().unwrap().err().unwrap()), - None => failure::bail!( - "expected start of a cfg expression, \ - found nothing" - ), - } - } - - fn cfg(&mut self) -> CargoResult { - match self.t.next() { - Some(Ok(Token::Ident(name))) => { - let e = if self.r#try(&Token::Equals) { - let val = match self.t.next() { - Some(Ok(Token::String(s))) => s, - Some(Ok(t)) => failure::bail!("expected a string, found {}", t.classify()), - Some(Err(e)) => return Err(e), - None => failure::bail!("expected a string, found nothing"), - }; - Cfg::KeyPair(name.to_string(), val.to_string()) - } else { - Cfg::Name(name.to_string()) - }; - Ok(e) - } - Some(Ok(t)) => failure::bail!("expected identifier, found {}", t.classify()), - Some(Err(e)) => Err(e), - None => failure::bail!("expected identifier, found nothing"), - } - } - - fn r#try(&mut self, token: &Token<'a>) -> bool { - match self.t.peek() { - Some(&Ok(ref t)) if token == t => {} - _ => return false, - } - self.t.next(); - true - } - - fn eat(&mut self, token: &Token<'a>) -> CargoResult<()> { - match self.t.next() { - Some(Ok(ref t)) if token == t => Ok(()), - Some(Ok(t)) => failure::bail!("expected {}, found {}", token.classify(), t.classify()), - Some(Err(e)) => Err(e), - None => failure::bail!("expected {}, but cfg expr ended", token.classify()), - } - } -} - -impl<'a> Iterator for Tokenizer<'a> { - type Item = CargoResult>; - - fn next(&mut self) -> Option>> { - loop { - match self.s.next() { - Some((_, ' ')) => {} - Some((_, '(')) => return Some(Ok(Token::LeftParen)), - Some((_, ')')) => return Some(Ok(Token::RightParen)), - Some((_, ',')) => return Some(Ok(Token::Comma)), - Some((_, '=')) => return Some(Ok(Token::Equals)), - Some((start, '"')) => { - while let Some((end, ch)) = self.s.next() { - if ch == '"' { - return Some(Ok(Token::String(&self.orig[start + 1..end]))); - } - } - return Some(Err(failure::format_err!("unterminated string in cfg"))); - } - Some((start, ch)) if is_ident_start(ch) => { - while let Some(&(end, ch)) = self.s.peek() { - if !is_ident_rest(ch) { - return Some(Ok(Token::Ident(&self.orig[start..end]))); - } else { - self.s.next(); - } - } - return Some(Ok(Token::Ident(&self.orig[start..]))); - } - Some((_, ch)) => { - return Some(Err(failure::format_err!( - "unexpected character in \ - cfg `{}`, expected parens, \ - a comma, an identifier, or \ - a string", - ch - ))); - } - None => return None, - } - } - } -} - -fn is_ident_start(ch: char) -> bool { - ch == '_' || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') -} - -fn is_ident_rest(ch: char) -> bool { - is_ident_start(ch) || ('0' <= ch && ch <= '9') -} - -impl<'a> Token<'a> { - fn classify(&self) -> &str { - match *self { - Token::LeftParen => "`(`", - Token::RightParen => "`)`", - Token::Ident(..) => "an identifier", - Token::Comma => "`,`", - Token::Equals => "`=`", - Token::String(..) => "a string", - } - } -} +use std::fmt; +use std::iter; + +use std::str::{self, FromStr}; + +use serde::ser; + +use crate::util::errors::CargoResultExt; +use crate::util::CargoResult; + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Cfg { + Name(String), + KeyPair(String, String), +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum CfgExpr { + Not(Box), + All(Vec), + Any(Vec), + Value(Cfg), +} + +#[derive(PartialEq)] +enum Token<'a> { + LeftParen, + RightParen, + Ident(&'a str), + Comma, + Equals, + String(&'a str), +} + +#[derive(Eq, PartialEq, Hash, Ord, PartialOrd, Clone, Debug)] +pub enum Platform { + Name(String), + Cfg(CfgExpr), +} + +struct Tokenizer<'a> { + s: iter::Peekable>, + orig: &'a str, +} + +struct Parser<'a> { + t: iter::Peekable>, +} + +impl FromStr for Cfg { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.cfg()?; + if p.t.next().is_some() { + failure::bail!("malformed cfg value or key/value pair: `{}`", s) + } + Ok(e) + } +} + +impl fmt::Display for Cfg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Cfg::Name(ref s) => s.fmt(f), + Cfg::KeyPair(ref k, ref v) => write!(f, "{} = \"{}\"", k, v), + } + } +} + +impl CfgExpr { + /// Utility function to check if the key, "cfg(..)" matches the `target_cfg` + pub fn matches_key(key: &str, target_cfg: &[Cfg]) -> bool { + if key.starts_with("cfg(") && key.ends_with(')') { + let cfg = &key[4..key.len() - 1]; + + CfgExpr::from_str(cfg) + .ok() + .map(|ce| ce.matches(target_cfg)) + .unwrap_or(false) + } else { + false + } + } + + pub fn matches(&self, cfg: &[Cfg]) -> bool { + match *self { + CfgExpr::Not(ref e) => !e.matches(cfg), + CfgExpr::All(ref e) => e.iter().all(|e| e.matches(cfg)), + CfgExpr::Any(ref e) => e.iter().any(|e| e.matches(cfg)), + CfgExpr::Value(ref e) => cfg.contains(e), + } + } +} + +impl FromStr for CfgExpr { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + let mut p = Parser::new(s); + let e = p.expr()?; + if p.t.next().is_some() { + failure::bail!( + "can only have one cfg-expression, consider using all() or \ + any() explicitly" + ) + } + Ok(e) + } +} + +impl fmt::Display for CfgExpr { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + CfgExpr::Not(ref e) => write!(f, "not({})", e), + CfgExpr::All(ref e) => write!(f, "all({})", CommaSep(e)), + CfgExpr::Any(ref e) => write!(f, "any({})", CommaSep(e)), + CfgExpr::Value(ref e) => write!(f, "{}", e), + } + } +} + +struct CommaSep<'a, T>(&'a [T]); + +impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for (i, v) in self.0.iter().enumerate() { + if i > 0 { + write!(f, ", ")?; + } + write!(f, "{}", v)?; + } + Ok(()) + } +} + +impl<'a> Parser<'a> { + fn new(s: &'a str) -> Parser<'a> { + Parser { + t: Tokenizer { + s: s.char_indices().peekable(), + orig: s, + } + .peekable(), + } + } + + fn expr(&mut self) -> CargoResult { + match self.t.peek() { + Some(&Ok(Token::Ident(op @ "all"))) | Some(&Ok(Token::Ident(op @ "any"))) => { + self.t.next(); + let mut e = Vec::new(); + self.eat(&Token::LeftParen)?; + while !self.r#try(&Token::RightParen) { + e.push(self.expr()?); + if !self.r#try(&Token::Comma) { + self.eat(&Token::RightParen)?; + break; + } + } + if op == "all" { + Ok(CfgExpr::All(e)) + } else { + Ok(CfgExpr::Any(e)) + } + } + Some(&Ok(Token::Ident("not"))) => { + self.t.next(); + self.eat(&Token::LeftParen)?; + let e = self.expr()?; + self.eat(&Token::RightParen)?; + Ok(CfgExpr::Not(Box::new(e))) + } + Some(&Ok(..)) => self.cfg().map(CfgExpr::Value), + Some(&Err(..)) => Err(self.t.next().unwrap().err().unwrap()), + None => failure::bail!( + "expected start of a cfg expression, \ + found nothing" + ), + } + } + + fn cfg(&mut self) -> CargoResult { + match self.t.next() { + Some(Ok(Token::Ident(name))) => { + let e = if self.r#try(&Token::Equals) { + let val = match self.t.next() { + Some(Ok(Token::String(s))) => s, + Some(Ok(t)) => failure::bail!("expected a string, found {}", t.classify()), + Some(Err(e)) => return Err(e), + None => failure::bail!("expected a string, found nothing"), + }; + Cfg::KeyPair(name.to_string(), val.to_string()) + } else { + Cfg::Name(name.to_string()) + }; + Ok(e) + } + Some(Ok(t)) => failure::bail!("expected identifier, found {}", t.classify()), + Some(Err(e)) => Err(e), + None => failure::bail!("expected identifier, found nothing"), + } + } + + fn r#try(&mut self, token: &Token<'a>) -> bool { + match self.t.peek() { + Some(&Ok(ref t)) if token == t => {} + _ => return false, + } + self.t.next(); + true + } + + fn eat(&mut self, token: &Token<'a>) -> CargoResult<()> { + match self.t.next() { + Some(Ok(ref t)) if token == t => Ok(()), + Some(Ok(t)) => failure::bail!("expected {}, found {}", token.classify(), t.classify()), + Some(Err(e)) => Err(e), + None => failure::bail!("expected {}, but cfg expr ended", token.classify()), + } + } +} + +impl<'a> Iterator for Tokenizer<'a> { + type Item = CargoResult>; + + fn next(&mut self) -> Option>> { + loop { + match self.s.next() { + Some((_, ' ')) => {} + Some((_, '(')) => return Some(Ok(Token::LeftParen)), + Some((_, ')')) => return Some(Ok(Token::RightParen)), + Some((_, ',')) => return Some(Ok(Token::Comma)), + Some((_, '=')) => return Some(Ok(Token::Equals)), + Some((start, '"')) => { + while let Some((end, ch)) = self.s.next() { + if ch == '"' { + return Some(Ok(Token::String(&self.orig[start + 1..end]))); + } + } + return Some(Err(failure::format_err!("unterminated string in cfg"))); + } + Some((start, ch)) if is_ident_start(ch) => { + while let Some(&(end, ch)) = self.s.peek() { + if !is_ident_rest(ch) { + return Some(Ok(Token::Ident(&self.orig[start..end]))); + } else { + self.s.next(); + } + } + return Some(Ok(Token::Ident(&self.orig[start..]))); + } + Some((_, ch)) => { + return Some(Err(failure::format_err!( + "unexpected character in \ + cfg `{}`, expected parens, \ + a comma, an identifier, or \ + a string", + ch + ))); + } + None => return None, + } + } + } +} + +fn is_ident_start(ch: char) -> bool { + ch == '_' || ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') +} + +fn is_ident_rest(ch: char) -> bool { + is_ident_start(ch) || ('0' <= ch && ch <= '9') +} + +impl<'a> Token<'a> { + fn classify(&self) -> &str { + match *self { + Token::LeftParen => "`(`", + Token::RightParen => "`)`", + Token::Ident(..) => "an identifier", + Token::Comma => "`,`", + Token::Equals => "`=`", + Token::String(..) => "a string", + } + } +} + +impl Platform { + pub fn matches(&self, name: &str, cfg: &[Cfg]) -> bool { + match *self { + Platform::Name(ref p) => p == name, + Platform::Cfg(ref p) => p.matches(cfg), + } + } +} + +impl ser::Serialize for Platform { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + self.to_string().serialize(s) + } +} + +impl FromStr for Platform { + type Err = failure::Error; + + fn from_str(s: &str) -> CargoResult { + if s.starts_with("cfg(") && s.ends_with(')') { + let s = &s[4..s.len() - 1]; + let p = s.parse().map(Platform::Cfg).chain_err(|| { + failure::format_err!("failed to parse `{}` as a cfg expression", s) + })?; + Ok(p) + } else { + Ok(Platform::Name(s.to_string())) + } + } +} + +impl fmt::Display for Platform { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Platform::Name(ref n) => n.fmt(f), + Platform::Cfg(ref e) => write!(f, "cfg({})", e), + } + } +} diff --git a/src/cargo/util/mod.rs b/src/cargo/util/mod.rs index 50fbd8c2b1b..c342f22747b 100644 --- a/src/cargo/util/mod.rs +++ b/src/cargo/util/mod.rs @@ -1,89 +1,89 @@ -use std::time::Duration; - -pub use self::cfg::{Cfg, CfgExpr}; -pub use self::config::{homedir, Config, ConfigValue}; -pub use self::dependency_queue::DependencyQueue; -pub use self::diagnostic_server::RustfixDiagnosticServer; -pub use self::errors::{internal, process_error}; -pub use self::errors::{CargoResult, CargoResultExt, CliResult, Test}; -pub use self::errors::{CargoTestError, CliError, ProcessError}; -pub use self::flock::{FileLock, Filesystem}; -pub use self::graph::Graph; -pub use self::hex::{hash_u64, short_hash, to_hex}; -pub use self::into_url::IntoUrl; -pub use self::into_url_with_base::IntoUrlWithBase; -pub use self::lev_distance::{closest, closest_msg, lev_distance}; -pub use self::lockserver::{LockServer, LockServerClient, LockServerStarted}; -pub use self::paths::{bytes2path, dylib_path, join_paths, path2bytes}; -pub use self::paths::{dylib_path_envvar, normalize_path}; -pub use self::process_builder::{process, ProcessBuilder}; -pub use self::progress::{Progress, ProgressStyle}; -pub use self::read2::read2; -pub use self::rustc::Rustc; -pub use self::sha256::Sha256; -pub use self::to_semver::ToSemver; -pub use self::vcs::{existing_vcs_repo, FossilRepo, GitRepo, HgRepo, PijulRepo}; -pub use self::workspace::{ - print_available_benches, print_available_binaries, print_available_examples, - print_available_tests, -}; - -mod cfg; -pub mod command_prelude; -pub mod config; -mod dependency_queue; -pub mod diagnostic_server; -pub mod errors; -mod flock; -pub mod graph; -pub mod hex; -pub mod important_paths; -pub mod into_url; -mod into_url_with_base; -pub mod job; -pub mod lev_distance; -mod lockserver; -pub mod machine_message; -pub mod network; -pub mod paths; -pub mod process_builder; -pub mod profile; -mod progress; -mod read2; -pub mod rustc; -mod sha256; -pub mod to_semver; -pub mod toml; -mod vcs; -mod workspace; - -pub fn elapsed(duration: Duration) -> String { - let secs = duration.as_secs(); - - if secs >= 60 { - format!("{}m {:02}s", secs / 60, secs % 60) - } else { - format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000) - } -} - -/// Check the base requirements for a package name. -/// -/// This can be used for other things than package names, to enforce some -/// level of sanity. Note that package names have other restrictions -/// elsewhere. `cargo new` has a few restrictions, such as checking for -/// reserved names. crates.io has even more restrictions. -pub fn validate_package_name(name: &str, what: &str, help: &str) -> CargoResult<()> { - if let Some(ch) = name - .chars() - .find(|ch| !ch.is_alphanumeric() && *ch != '_' && *ch != '-') - { - failure::bail!("Invalid character `{}` in {}: `{}`{}", ch, what, name, help); - } - Ok(()) -} - -/// Whether or not this running in a Continuous Integration environment. -pub fn is_ci() -> bool { - std::env::var("CI").is_ok() || std::env::var("TF_BUILD").is_ok() -} +use std::time::Duration; + +pub use self::cfg::{Cfg, CfgExpr, Platform}; +pub use self::config::{homedir, Config, ConfigValue}; +pub use self::dependency_queue::DependencyQueue; +pub use self::diagnostic_server::RustfixDiagnosticServer; +pub use self::errors::{internal, process_error}; +pub use self::errors::{CargoResult, CargoResultExt, CliResult, Test}; +pub use self::errors::{CargoTestError, CliError, ProcessError}; +pub use self::flock::{FileLock, Filesystem}; +pub use self::graph::Graph; +pub use self::hex::{hash_u64, short_hash, to_hex}; +pub use self::into_url::IntoUrl; +pub use self::into_url_with_base::IntoUrlWithBase; +pub use self::lev_distance::{closest, closest_msg, lev_distance}; +pub use self::lockserver::{LockServer, LockServerClient, LockServerStarted}; +pub use self::paths::{bytes2path, dylib_path, join_paths, path2bytes}; +pub use self::paths::{dylib_path_envvar, normalize_path}; +pub use self::process_builder::{process, ProcessBuilder}; +pub use self::progress::{Progress, ProgressStyle}; +pub use self::read2::read2; +pub use self::rustc::Rustc; +pub use self::sha256::Sha256; +pub use self::to_semver::ToSemver; +pub use self::vcs::{existing_vcs_repo, FossilRepo, GitRepo, HgRepo, PijulRepo}; +pub use self::workspace::{ + print_available_benches, print_available_binaries, print_available_examples, + print_available_tests, +}; + +mod cfg; +pub mod command_prelude; +pub mod config; +mod dependency_queue; +pub mod diagnostic_server; +pub mod errors; +mod flock; +pub mod graph; +pub mod hex; +pub mod important_paths; +pub mod into_url; +mod into_url_with_base; +pub mod job; +pub mod lev_distance; +mod lockserver; +pub mod machine_message; +pub mod network; +pub mod paths; +pub mod process_builder; +pub mod profile; +mod progress; +mod read2; +pub mod rustc; +mod sha256; +pub mod to_semver; +pub mod toml; +mod vcs; +mod workspace; + +pub fn elapsed(duration: Duration) -> String { + let secs = duration.as_secs(); + + if secs >= 60 { + format!("{}m {:02}s", secs / 60, secs % 60) + } else { + format!("{}.{:02}s", secs, duration.subsec_nanos() / 10_000_000) + } +} + +/// Check the base requirements for a package name. +/// +/// This can be used for other things than package names, to enforce some +/// level of sanity. Note that package names have other restrictions +/// elsewhere. `cargo new` has a few restrictions, such as checking for +/// reserved names. crates.io has even more restrictions. +pub fn validate_package_name(name: &str, what: &str, help: &str) -> CargoResult<()> { + if let Some(ch) = name + .chars() + .find(|ch| !ch.is_alphanumeric() && *ch != '_' && *ch != '-') + { + failure::bail!("Invalid character `{}` in {}: `{}`{}", ch, what, name, help); + } + Ok(()) +} + +/// Whether or not this running in a Continuous Integration environment. +pub fn is_ci() -> bool { + std::env::var("CI").is_ok() || std::env::var("TF_BUILD").is_ok() +} diff --git a/src/cargo/util/toml/mod.rs b/src/cargo/util/toml/mod.rs index e2232827885..68e958b1cdc 100644 --- a/src/cargo/util/toml/mod.rs +++ b/src/cargo/util/toml/mod.rs @@ -1,1591 +1,1604 @@ -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; -use std::fmt; -use std::fs; -use std::path::{Path, PathBuf}; -use std::rc::Rc; -use std::str; - -use failure::bail; -use log::{debug, trace}; -use semver::{self, VersionReq}; -use serde::de; -use serde::ser; -use serde::{Deserialize, Serialize}; -use url::Url; - -use crate::core::dependency::{Kind, Platform}; -use crate::core::manifest::{LibKind, ManifestMetadata, TargetSourcePath, Warnings}; -use crate::core::profiles::Profiles; -use crate::core::{Dependency, Manifest, PackageId, Summary, Target}; -use crate::core::{Edition, EitherManifest, Feature, Features, VirtualManifest}; -use crate::core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; -use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY}; -use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; -use crate::util::{self, paths, validate_package_name, Config, IntoUrl}; - -mod targets; -use self::targets::targets; - -pub fn read_manifest( - path: &Path, - source_id: SourceId, - config: &Config, -) -> Result<(EitherManifest, Vec), ManifestError> { - trace!( - "read_manifest; path={}; source-id={}", - path.display(), - source_id - ); - let contents = paths::read(path).map_err(|err| ManifestError::new(err, path.into()))?; - - do_read_manifest(&contents, path, source_id, config) - .chain_err(|| format!("failed to parse manifest at `{}`", path.display())) - .map_err(|err| ManifestError::new(err, path.into())) -} - -fn do_read_manifest( - contents: &str, - manifest_file: &Path, - source_id: SourceId, - config: &Config, -) -> CargoResult<(EitherManifest, Vec)> { - let package_root = manifest_file.parent().unwrap(); - - let toml = { - let pretty_filename = manifest_file - .strip_prefix(config.cwd()) - .unwrap_or(manifest_file); - parse(contents, pretty_filename, config)? - }; - - let mut unused = BTreeSet::new(); - let manifest: TomlManifest = serde_ignored::deserialize(toml, |path| { - let mut key = String::new(); - stringify(&mut key, &path); - unused.insert(key); - })?; - let add_unused = |warnings: &mut Warnings| { - for key in unused { - warnings.add_warning(format!("unused manifest key: {}", key)); - if key == "profile.debug" || key == "profiles.debug" { - warnings.add_warning("use `[profile.dev]` to configure debug builds".to_string()); - } - } - }; - - let manifest = Rc::new(manifest); - return if manifest.project.is_some() || manifest.package.is_some() { - let (mut manifest, paths) = - TomlManifest::to_real_manifest(&manifest, source_id, package_root, config)?; - add_unused(manifest.warnings_mut()); - if !manifest.targets().iter().any(|t| !t.is_custom_build()) { - bail!( - "no targets specified in the manifest\n \ - either src/lib.rs, src/main.rs, a [lib] section, or \ - [[bin]] section must be present" - ) - } - Ok((EitherManifest::Real(manifest), paths)) - } else { - let (mut m, paths) = - TomlManifest::to_virtual_manifest(&manifest, source_id, package_root, config)?; - add_unused(m.warnings_mut()); - Ok((EitherManifest::Virtual(m), paths)) - }; - - fn stringify(dst: &mut String, path: &serde_ignored::Path<'_>) { - use serde_ignored::Path; - - match *path { - Path::Root => {} - Path::Seq { parent, index } => { - stringify(dst, parent); - if !dst.is_empty() { - dst.push('.'); - } - dst.push_str(&index.to_string()); - } - Path::Map { parent, ref key } => { - stringify(dst, parent); - if !dst.is_empty() { - dst.push('.'); - } - dst.push_str(key); - } - Path::Some { parent } - | Path::NewtypeVariant { parent } - | Path::NewtypeStruct { parent } => stringify(dst, parent), - } - } -} - -pub fn parse(toml: &str, file: &Path, config: &Config) -> CargoResult { - let first_error = match toml.parse() { - Ok(ret) => return Ok(ret), - Err(e) => e, - }; - - let mut second_parser = toml::de::Deserializer::new(toml); - second_parser.set_require_newline_after_table(false); - if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { - let msg = format!( - "\ -TOML file found which contains invalid syntax and will soon not parse -at `{}`. - -The TOML spec requires newlines after table definitions (e.g., `[a] b = 1` is -invalid), but this file has a table header which does not have a newline after -it. A newline needs to be added and this warning will soon become a hard error -in the future.", - file.display() - ); - config.shell().warn(&msg)?; - return Ok(ret); - } - - let mut third_parser = toml::de::Deserializer::new(toml); - third_parser.set_allow_duplicate_after_longer_table(true); - if let Ok(ret) = toml::Value::deserialize(&mut third_parser) { - let msg = format!( - "\ -TOML file found which contains invalid syntax and will soon not parse -at `{}`. - -The TOML spec requires that each table header is defined at most once, but -historical versions of Cargo have erroneously accepted this file. The table -definitions will need to be merged together with one table header to proceed, -and this will become a hard error in the future.", - file.display() - ); - config.shell().warn(&msg)?; - return Ok(ret); - } - - let first_error = failure::Error::from(first_error); - Err(first_error.context("could not parse input as TOML").into()) -} - -type TomlLibTarget = TomlTarget; -type TomlBinTarget = TomlTarget; -type TomlExampleTarget = TomlTarget; -type TomlTestTarget = TomlTarget; -type TomlBenchTarget = TomlTarget; - -#[derive(Clone, Debug, Serialize)] -#[serde(untagged)] -pub enum TomlDependency { - Simple(String), - Detailed(DetailedTomlDependency), -} - -impl<'de> de::Deserialize<'de> for TomlDependency { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct TomlDependencyVisitor; - - impl<'de> de::Visitor<'de> for TomlDependencyVisitor { - type Value = TomlDependency; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str( - "a version string like \"0.9.8\" or a \ - detailed dependency like { version = \"0.9.8\" }", - ) - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - Ok(TomlDependency::Simple(s.to_owned())) - } - - fn visit_map(self, map: V) -> Result - where - V: de::MapAccess<'de>, - { - let mvd = de::value::MapAccessDeserializer::new(map); - DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) - } - } - - deserializer.deserialize_any(TomlDependencyVisitor) - } -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default)] -#[serde(rename_all = "kebab-case")] -pub struct DetailedTomlDependency { - version: Option, - registry: Option, - /// The URL of the `registry` field. - /// This is an internal implementation detail. When Cargo creates a - /// package, it replaces `registry` with `registry-index` so that the - /// manifest contains the correct URL. All users won't have the same - /// registry names configured, so Cargo can't rely on just the name for - /// crates published by other users. - registry_index: Option, - path: Option, - git: Option, - branch: Option, - tag: Option, - rev: Option, - features: Option>, - optional: Option, - default_features: Option, - #[serde(rename = "default_features")] - default_features2: Option, - package: Option, - public: Option, -} - -#[derive(Debug, Deserialize, Serialize)] -#[serde(rename_all = "kebab-case")] -pub struct TomlManifest { - cargo_features: Option>, - package: Option>, - project: Option>, - profile: Option, - lib: Option, - bin: Option>, - example: Option>, - test: Option>, - bench: Option>, - dependencies: Option>, - dev_dependencies: Option>, - #[serde(rename = "dev_dependencies")] - dev_dependencies2: Option>, - build_dependencies: Option>, - #[serde(rename = "build_dependencies")] - build_dependencies2: Option>, - features: Option>>, - target: Option>, - replace: Option>, - patch: Option>>, - workspace: Option, - badges: Option>>, -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default)] -pub struct TomlProfiles { - pub test: Option, - pub doc: Option, - pub bench: Option, - pub dev: Option, - pub release: Option, -} - -impl TomlProfiles { - pub fn validate(&self, features: &Features, warnings: &mut Vec) -> CargoResult<()> { - if let Some(ref test) = self.test { - test.validate("test", features, warnings)?; - } - if let Some(ref doc) = self.doc { - doc.validate("doc", features, warnings)?; - } - if let Some(ref bench) = self.bench { - bench.validate("bench", features, warnings)?; - } - if let Some(ref dev) = self.dev { - dev.validate("dev", features, warnings)?; - } - if let Some(ref release) = self.release { - release.validate("release", features, warnings)?; - } - Ok(()) - } -} - -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct TomlOptLevel(pub String); - -impl<'de> de::Deserialize<'de> for TomlOptLevel { - fn deserialize(d: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = TomlOptLevel; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("an optimization level") - } - - fn visit_i64(self, value: i64) -> Result - where - E: de::Error, - { - Ok(TomlOptLevel(value.to_string())) - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - if value == "s" || value == "z" { - Ok(TomlOptLevel(value.to_string())) - } else { - Err(E::custom(format!( - "must be an integer, `z`, or `s`, \ - but found: {}", - value - ))) - } - } - } - - d.deserialize_any(Visitor) - } -} - -impl ser::Serialize for TomlOptLevel { - fn serialize(&self, serializer: S) -> Result - where - S: ser::Serializer, - { - match self.0.parse::() { - Ok(n) => n.serialize(serializer), - Err(_) => self.0.serialize(serializer), - } - } -} - -#[derive(Clone, Debug, Serialize, Eq, PartialEq)] -#[serde(untagged)] -pub enum U32OrBool { - U32(u32), - Bool(bool), -} - -impl<'de> de::Deserialize<'de> for U32OrBool { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = U32OrBool; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a boolean or an integer") - } - - fn visit_bool(self, b: bool) -> Result - where - E: de::Error, - { - Ok(U32OrBool::Bool(b)) - } - - fn visit_i64(self, u: i64) -> Result - where - E: de::Error, - { - Ok(U32OrBool::U32(u as u32)) - } - - fn visit_u64(self, u: u64) -> Result - where - E: de::Error, - { - Ok(U32OrBool::U32(u as u32)) - } - } - - deserializer.deserialize_any(Visitor) - } -} - -#[derive(Deserialize, Serialize, Clone, Debug, Default, Eq, PartialEq)] -#[serde(rename_all = "kebab-case")] -pub struct TomlProfile { - pub opt_level: Option, - pub lto: Option, - pub codegen_units: Option, - pub debug: Option, - pub debug_assertions: Option, - pub rpath: Option, - pub panic: Option, - pub overflow_checks: Option, - pub incremental: Option, - pub overrides: Option>, - pub build_override: Option>, -} - -#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] -pub enum ProfilePackageSpec { - Spec(PackageIdSpec), - All, -} - -impl ser::Serialize for ProfilePackageSpec { - fn serialize(&self, s: S) -> Result - where - S: ser::Serializer, - { - match *self { - ProfilePackageSpec::Spec(ref spec) => spec.serialize(s), - ProfilePackageSpec::All => "*".serialize(s), - } - } -} - -impl<'de> de::Deserialize<'de> for ProfilePackageSpec { - fn deserialize(d: D) -> Result - where - D: de::Deserializer<'de>, - { - let string = String::deserialize(d)?; - if string == "*" { - Ok(ProfilePackageSpec::All) - } else { - PackageIdSpec::parse(&string) - .map_err(de::Error::custom) - .map(ProfilePackageSpec::Spec) - } - } -} - -impl TomlProfile { - pub fn validate( - &self, - name: &str, - features: &Features, - warnings: &mut Vec, - ) -> CargoResult<()> { - if let Some(ref profile) = self.build_override { - features.require(Feature::profile_overrides())?; - profile.validate_override()?; - } - if let Some(ref override_map) = self.overrides { - features.require(Feature::profile_overrides())?; - for profile in override_map.values() { - profile.validate_override()?; - } - } - - match name { - "dev" | "release" => {} - _ => { - if self.overrides.is_some() || self.build_override.is_some() { - bail!( - "Profile overrides may only be specified for \ - `dev` or `release` profile, not `{}`.", - name - ); - } - } - } - - match name { - "doc" => { - warnings.push("profile `doc` is deprecated and has no effect".to_string()); - } - "test" | "bench" => { - if self.panic.is_some() { - warnings.push(format!("`panic` setting is ignored for `{}` profile", name)) - } - } - _ => {} - } - - if let Some(panic) = &self.panic { - if panic != "unwind" && panic != "abort" { - bail!( - "`panic` setting of `{}` is not a valid setting,\ - must be `unwind` or `abort`", - panic - ); - } - } - Ok(()) - } - - fn validate_override(&self) -> CargoResult<()> { - if self.overrides.is_some() || self.build_override.is_some() { - bail!("Profile overrides cannot be nested."); - } - if self.panic.is_some() { - bail!("`panic` may not be specified in a profile override.") - } - if self.lto.is_some() { - bail!("`lto` may not be specified in a profile override.") - } - if self.rpath.is_some() { - bail!("`rpath` may not be specified in a profile override.") - } - Ok(()) - } -} - -#[derive(Clone, Debug, Serialize, Eq, PartialEq)] -pub struct StringOrVec(Vec); - -impl<'de> de::Deserialize<'de> for StringOrVec { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = StringOrVec; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("string or list of strings") - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - Ok(StringOrVec(vec![s.to_string()])) - } - - fn visit_seq(self, v: V) -> Result - where - V: de::SeqAccess<'de>, - { - let seq = de::value::SeqAccessDeserializer::new(v); - Vec::deserialize(seq).map(StringOrVec) - } - } - - deserializer.deserialize_any(Visitor) - } -} - -#[derive(Clone, Debug, Serialize, Eq, PartialEq)] -#[serde(untagged)] -pub enum StringOrBool { - String(String), - Bool(bool), -} - -impl<'de> de::Deserialize<'de> for StringOrBool { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = StringOrBool; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a boolean or a string") - } - - fn visit_bool(self, b: bool) -> Result - where - E: de::Error, - { - Ok(StringOrBool::Bool(b)) - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - Ok(StringOrBool::String(s.to_string())) - } - } - - deserializer.deserialize_any(Visitor) - } -} - -#[derive(Clone, Debug, Serialize)] -#[serde(untagged)] -pub enum VecStringOrBool { - VecString(Vec), - Bool(bool), -} - -impl<'de> de::Deserialize<'de> for VecStringOrBool { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - struct Visitor; - - impl<'de> de::Visitor<'de> for Visitor { - type Value = VecStringOrBool; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("a boolean or vector of strings") - } - - fn visit_seq(self, v: V) -> Result - where - V: de::SeqAccess<'de>, - { - let seq = de::value::SeqAccessDeserializer::new(v); - Vec::deserialize(seq).map(VecStringOrBool::VecString) - } - - fn visit_bool(self, b: bool) -> Result - where - E: de::Error, - { - Ok(VecStringOrBool::Bool(b)) - } - } - - deserializer.deserialize_any(Visitor) - } -} - -/// Represents the `package`/`project` sections of a `Cargo.toml`. -/// -/// Note that the order of the fields matters, since this is the order they -/// are serialized to a TOML file. For example, you cannot have values after -/// the field `metadata`, since it is a table and values cannot appear after -/// tables. -#[derive(Deserialize, Serialize, Clone, Debug)] -pub struct TomlProject { - edition: Option, - name: String, - version: semver::Version, - authors: Option>, - build: Option, - metabuild: Option, - links: Option, - exclude: Option>, - include: Option>, - publish: Option, - #[serde(rename = "publish-lockfile")] - publish_lockfile: Option, - workspace: Option, - #[serde(rename = "im-a-teapot")] - im_a_teapot: Option, - autobins: Option, - autoexamples: Option, - autotests: Option, - autobenches: Option, - #[serde(rename = "namespaced-features")] - namespaced_features: Option, - #[serde(rename = "default-run")] - default_run: Option, - - // Package metadata. - description: Option, - homepage: Option, - documentation: Option, - readme: Option, - keywords: Option>, - categories: Option>, - license: Option, - #[serde(rename = "license-file")] - license_file: Option, - repository: Option, - metadata: Option, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct TomlWorkspace { - members: Option>, - #[serde(rename = "default-members")] - default_members: Option>, - exclude: Option>, -} - -impl TomlProject { - pub fn to_package_id(&self, source_id: SourceId) -> CargoResult { - PackageId::new(&self.name, self.version.clone(), source_id) - } -} - -struct Context<'a, 'b> { - pkgid: Option, - deps: &'a mut Vec, - source_id: SourceId, - nested_paths: &'a mut Vec, - config: &'b Config, - warnings: &'a mut Vec, - platform: Option, - root: &'a Path, - features: &'a Features, -} - -impl TomlManifest { - pub fn prepare_for_publish(&self, config: &Config) -> CargoResult { - let mut package = self - .package - .as_ref() - .or_else(|| self.project.as_ref()) - .unwrap() - .clone(); - package.workspace = None; - return Ok(TomlManifest { - package: Some(package), - project: None, - profile: self.profile.clone(), - lib: self.lib.clone(), - bin: self.bin.clone(), - example: self.example.clone(), - test: self.test.clone(), - bench: self.bench.clone(), - dependencies: map_deps(config, self.dependencies.as_ref())?, - dev_dependencies: map_deps( - config, - self.dev_dependencies - .as_ref() - .or_else(|| self.dev_dependencies2.as_ref()), - )?, - dev_dependencies2: None, - build_dependencies: map_deps( - config, - self.build_dependencies - .as_ref() - .or_else(|| self.build_dependencies2.as_ref()), - )?, - build_dependencies2: None, - features: self.features.clone(), - target: match self.target.as_ref().map(|target_map| { - target_map - .iter() - .map(|(k, v)| { - Ok(( - k.clone(), - TomlPlatform { - dependencies: map_deps(config, v.dependencies.as_ref())?, - dev_dependencies: map_deps( - config, - v.dev_dependencies - .as_ref() - .or_else(|| v.dev_dependencies2.as_ref()), - )?, - dev_dependencies2: None, - build_dependencies: map_deps( - config, - v.build_dependencies - .as_ref() - .or_else(|| v.build_dependencies2.as_ref()), - )?, - build_dependencies2: None, - }, - )) - }) - .collect() - }) { - Some(Ok(v)) => Some(v), - Some(Err(e)) => return Err(e), - None => None, - }, - replace: None, - patch: None, - workspace: None, - badges: self.badges.clone(), - cargo_features: self.cargo_features.clone(), - }); - - fn map_deps( - config: &Config, - deps: Option<&BTreeMap>, - ) -> CargoResult>> { - let deps = match deps { - Some(deps) => deps, - None => return Ok(None), - }; - let deps = deps - .iter() - .map(|(k, v)| Ok((k.clone(), map_dependency(config, v)?))) - .collect::>>()?; - Ok(Some(deps)) - } - - fn map_dependency(config: &Config, dep: &TomlDependency) -> CargoResult { - match *dep { - TomlDependency::Detailed(ref d) => { - let mut d = d.clone(); - d.path.take(); // path dependencies become crates.io deps - // registry specifications are elaborated to the index URL - if let Some(registry) = d.registry.take() { - let src = SourceId::alt_registry(config, ®istry)?; - d.registry_index = Some(src.url().to_string()); - } - Ok(TomlDependency::Detailed(d)) - } - TomlDependency::Simple(ref s) => { - Ok(TomlDependency::Detailed(DetailedTomlDependency { - version: Some(s.clone()), - ..Default::default() - })) - } - } - } - } - - pub fn to_real_manifest( - me: &Rc, - source_id: SourceId, - package_root: &Path, - config: &Config, - ) -> CargoResult<(Manifest, Vec)> { - let mut nested_paths = vec![]; - let mut warnings = vec![]; - let mut errors = vec![]; - - // Parse features first so they will be available when parsing other parts of the TOML. - let empty = Vec::new(); - let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); - let features = Features::new(cargo_features, &mut warnings)?; - - let project = me.project.as_ref().or_else(|| me.package.as_ref()); - let project = project.ok_or_else(|| failure::format_err!("no `package` section found"))?; - - let package_name = project.name.trim(); - if package_name.is_empty() { - bail!("package name cannot be an empty string") - } - - validate_package_name(package_name, "package name", "")?; - - let pkgid = project.to_package_id(source_id)?; - - let edition = if let Some(ref edition) = project.edition { - features - .require(Feature::edition()) - .chain_err(|| "editions are unstable")?; - edition - .parse() - .chain_err(|| "failed to parse the `edition` key")? - } else { - Edition::Edition2015 - }; - - if project.metabuild.is_some() { - features.require(Feature::metabuild())?; - } - - // If we have no lib at all, use the inferred lib, if available. - // If we have a lib with a path, we're done. - // If we have a lib with no path, use the inferred lib or else the package name. - let targets = targets( - &features, - me, - package_name, - package_root, - edition, - &project.build, - &project.metabuild, - &mut warnings, - &mut errors, - )?; - - if targets.is_empty() { - debug!("manifest has no build targets"); - } - - if let Err(e) = unique_build_targets(&targets, package_root) { - warnings.push(format!( - "file found to be present in multiple \ - build targets: {}", - e - )); - } - - let mut deps = Vec::new(); - let replace; - let patch; - - { - let mut cx = Context { - pkgid: Some(pkgid), - deps: &mut deps, - source_id, - nested_paths: &mut nested_paths, - config, - warnings: &mut warnings, - features: &features, - platform: None, - root: package_root, - }; - - fn process_dependencies( - cx: &mut Context<'_, '_>, - new_deps: Option<&BTreeMap>, - kind: Option, - ) -> CargoResult<()> { - let dependencies = match new_deps { - Some(dependencies) => dependencies, - None => return Ok(()), - }; - for (n, v) in dependencies.iter() { - let dep = v.to_dependency(n, cx, kind)?; - cx.deps.push(dep); - } - - Ok(()) - } - - // Collect the dependencies. - process_dependencies(&mut cx, me.dependencies.as_ref(), None)?; - let dev_deps = me - .dev_dependencies - .as_ref() - .or_else(|| me.dev_dependencies2.as_ref()); - process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; - let build_deps = me - .build_dependencies - .as_ref() - .or_else(|| me.build_dependencies2.as_ref()); - process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; - - for (name, platform) in me.target.iter().flat_map(|t| t) { - cx.platform = Some(name.parse()?); - process_dependencies(&mut cx, platform.dependencies.as_ref(), None)?; - let build_deps = platform - .build_dependencies - .as_ref() - .or_else(|| platform.build_dependencies2.as_ref()); - process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; - let dev_deps = platform - .dev_dependencies - .as_ref() - .or_else(|| platform.dev_dependencies2.as_ref()); - process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; - } - - replace = me.replace(&mut cx)?; - patch = me.patch(&mut cx)?; - } - - { - let mut names_sources = BTreeMap::new(); - for dep in &deps { - let name = dep.name_in_toml(); - let prev = names_sources.insert(name.to_string(), dep.source_id()); - if prev.is_some() && prev != Some(dep.source_id()) { - bail!( - "Dependency '{}' has different source paths depending on the build \ - target. Each dependency must have a single canonical source path \ - irrespective of build target.", - name - ); - } - } - } - - let exclude = project.exclude.clone().unwrap_or_default(); - let include = project.include.clone().unwrap_or_default(); - if project.namespaced_features.is_some() { - features.require(Feature::namespaced_features())?; - } - - let summary = Summary::new( - pkgid, - deps, - &me.features - .as_ref() - .map(|x| { - x.iter() - .map(|(k, v)| (k.as_str(), v.iter().collect())) - .collect() - }) - .unwrap_or_else(BTreeMap::new), - project.links.as_ref().map(|x| x.as_str()), - project.namespaced_features.unwrap_or(false), - )?; - let metadata = ManifestMetadata { - description: project.description.clone(), - homepage: project.homepage.clone(), - documentation: project.documentation.clone(), - readme: project.readme.clone(), - authors: project.authors.clone().unwrap_or_default(), - license: project.license.clone(), - license_file: project.license_file.clone(), - repository: project.repository.clone(), - keywords: project.keywords.clone().unwrap_or_default(), - categories: project.categories.clone().unwrap_or_default(), - badges: me.badges.clone().unwrap_or_default(), - links: project.links.clone(), - }; - - let workspace_config = match (me.workspace.as_ref(), project.workspace.as_ref()) { - (Some(config), None) => WorkspaceConfig::Root(WorkspaceRootConfig::new( - package_root, - &config.members, - &config.default_members, - &config.exclude, - )), - (None, root) => WorkspaceConfig::Member { - root: root.cloned(), - }, - (Some(..), Some(..)) => bail!( - "cannot configure both `package.workspace` and \ - `[workspace]`, only one can be specified" - ), - }; - let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; - let publish = match project.publish { - Some(VecStringOrBool::VecString(ref vecstring)) => Some(vecstring.clone()), - Some(VecStringOrBool::Bool(false)) => Some(vec![]), - None | Some(VecStringOrBool::Bool(true)) => None, - }; - - let publish_lockfile = match project.publish_lockfile { - Some(b) => { - features.require(Feature::publish_lockfile())?; - warnings.push( - "The `publish-lockfile` feature is deprecated and currently \ - has no effect. It may be removed in a future version." - .to_string(), - ); - b - } - None => features.is_enabled(Feature::publish_lockfile()), - }; - - if summary.features().contains_key("default-features") { - warnings.push( - "`default-features = [\"..\"]` was found in [features]. \ - Did you mean to use `default = [\"..\"]`?" - .to_string(), - ) - } - - if let Some(run) = &project.default_run { - if !targets - .iter() - .filter(|t| t.is_bin()) - .any(|t| t.name() == run) - { - let suggestion = - util::closest_msg(run, targets.iter().filter(|t| t.is_bin()), |t| t.name()); - bail!("default-run target `{}` not found{}", run, suggestion); - } - } - - let custom_metadata = project.metadata.clone(); - let mut manifest = Manifest::new( - summary, - targets, - exclude, - include, - project.links.clone(), - metadata, - custom_metadata, - profiles, - publish, - publish_lockfile, - replace, - patch, - workspace_config, - features, - edition, - project.im_a_teapot, - project.default_run.clone(), - Rc::clone(me), - project.metabuild.clone().map(|sov| sov.0), - ); - if project.license_file.is_some() && project.license.is_some() { - manifest.warnings_mut().add_warning( - "only one of `license` or \ - `license-file` is necessary" - .to_string(), - ); - } - for warning in warnings { - manifest.warnings_mut().add_warning(warning); - } - for error in errors { - manifest.warnings_mut().add_critical_warning(error); - } - - manifest.feature_gate()?; - - Ok((manifest, nested_paths)) - } - - fn to_virtual_manifest( - me: &Rc, - source_id: SourceId, - root: &Path, - config: &Config, - ) -> CargoResult<(VirtualManifest, Vec)> { - if me.project.is_some() { - bail!("virtual manifests do not define [project]"); - } - if me.package.is_some() { - bail!("virtual manifests do not define [package]"); - } - if me.lib.is_some() { - bail!("virtual manifests do not specify [lib]"); - } - if me.bin.is_some() { - bail!("virtual manifests do not specify [[bin]]"); - } - if me.example.is_some() { - bail!("virtual manifests do not specify [[example]]"); - } - if me.test.is_some() { - bail!("virtual manifests do not specify [[test]]"); - } - if me.bench.is_some() { - bail!("virtual manifests do not specify [[bench]]"); - } - if me.dependencies.is_some() { - bail!("virtual manifests do not specify [dependencies]"); - } - if me.dev_dependencies.is_some() || me.dev_dependencies2.is_some() { - bail!("virtual manifests do not specify [dev-dependencies]"); - } - if me.build_dependencies.is_some() || me.build_dependencies2.is_some() { - bail!("virtual manifests do not specify [build-dependencies]"); - } - if me.features.is_some() { - bail!("virtual manifests do not specify [features]"); - } - if me.target.is_some() { - bail!("virtual manifests do not specify [target]"); - } - if me.badges.is_some() { - bail!("virtual manifests do not specify [badges]"); - } - - let mut nested_paths = Vec::new(); - let mut warnings = Vec::new(); - let mut deps = Vec::new(); - let empty = Vec::new(); - let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); - let features = Features::new(cargo_features, &mut warnings)?; - - let (replace, patch) = { - let mut cx = Context { - pkgid: None, - deps: &mut deps, - source_id, - nested_paths: &mut nested_paths, - config, - warnings: &mut warnings, - platform: None, - features: &features, - root, - }; - (me.replace(&mut cx)?, me.patch(&mut cx)?) - }; - let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; - let workspace_config = match me.workspace { - Some(ref config) => WorkspaceConfig::Root(WorkspaceRootConfig::new( - root, - &config.members, - &config.default_members, - &config.exclude, - )), - None => { - bail!("virtual manifests must be configured with [workspace]"); - } - }; - Ok(( - VirtualManifest::new(replace, patch, workspace_config, profiles, features), - nested_paths, - )) - } - - fn replace(&self, cx: &mut Context<'_, '_>) -> CargoResult> { - if self.patch.is_some() && self.replace.is_some() { - bail!("cannot specify both [replace] and [patch]"); - } - let mut replace = Vec::new(); - for (spec, replacement) in self.replace.iter().flat_map(|x| x) { - let mut spec = PackageIdSpec::parse(spec).chain_err(|| { - format!( - "replacements must specify a valid semver \ - version to replace, but `{}` does not", - spec - ) - })?; - if spec.url().is_none() { - spec.set_url(CRATES_IO_INDEX.parse().unwrap()); - } - - let version_specified = match *replacement { - TomlDependency::Detailed(ref d) => d.version.is_some(), - TomlDependency::Simple(..) => true, - }; - if version_specified { - bail!( - "replacements cannot specify a version \ - requirement, but found one for `{}`", - spec - ); - } - - let mut dep = replacement.to_dependency(spec.name().as_str(), cx, None)?; - { - let version = spec.version().ok_or_else(|| { - failure::format_err!( - "replacements must specify a version \ - to replace, but `{}` does not", - spec - ) - })?; - dep.set_version_req(VersionReq::exact(version)); - } - replace.push((spec, dep)); - } - Ok(replace) - } - - fn patch(&self, cx: &mut Context<'_, '_>) -> CargoResult>> { - let mut patch = HashMap::new(); - for (url, deps) in self.patch.iter().flat_map(|x| x) { - let url = match &url[..] { - CRATES_IO_REGISTRY => CRATES_IO_INDEX.parse().unwrap(), - _ => cx - .config - .get_registry_index(url) - .or_else(|_| url.into_url()) - .chain_err(|| { - format!("[patch] entry `{}` should be a URL or registry name", url) - })?, - }; - patch.insert( - url, - deps.iter() - .map(|(name, dep)| dep.to_dependency(name, cx, None)) - .collect::>>()?, - ); - } - Ok(patch) - } - - fn maybe_custom_build( - &self, - build: &Option, - package_root: &Path, - ) -> Option { - let build_rs = package_root.join("build.rs"); - match *build { - // Explicitly no build script. - Some(StringOrBool::Bool(false)) => None, - Some(StringOrBool::Bool(true)) => Some(build_rs), - Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), - None => { - match fs::metadata(&build_rs) { - // If there is a `build.rs` file next to the `Cargo.toml`, assume it is - // a build script. - Ok(ref e) if e.is_file() => Some(build_rs), - Ok(_) | Err(_) => None, - } - } - } - } - - pub fn has_profiles(&self) -> bool { - self.profile.is_some() - } -} - -/// Checks a list of build targets, and ensures the target names are unique within a vector. -/// If not, the name of the offending build target is returned. -fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { - let mut seen = HashSet::new(); - for target in targets { - if let TargetSourcePath::Path(path) = target.src_path() { - let full = package_root.join(path); - if !seen.insert(full.clone()) { - return Err(full.display().to_string()); - } - } - } - Ok(()) -} - -impl TomlDependency { - fn to_dependency( - &self, - name: &str, - cx: &mut Context<'_, '_>, - kind: Option, - ) -> CargoResult { - match *self { - TomlDependency::Simple(ref version) => DetailedTomlDependency { - version: Some(version.clone()), - ..Default::default() - } - .to_dependency(name, cx, kind), - TomlDependency::Detailed(ref details) => details.to_dependency(name, cx, kind), - } - } -} - -impl DetailedTomlDependency { - fn to_dependency( - &self, - name_in_toml: &str, - cx: &mut Context<'_, '_>, - kind: Option, - ) -> CargoResult { - if self.version.is_none() && self.path.is_none() && self.git.is_none() { - let msg = format!( - "dependency ({}) specified without \ - providing a local path, Git repository, or \ - version to use. This will be considered an \ - error in future versions", - name_in_toml - ); - cx.warnings.push(msg); - } - - if let Some(version) = &self.version { - if version.contains('+') { - cx.warnings.push(format!( - "version requirement `{}` for dependency `{}` \ - includes semver metadata which will be ignored, removing the \ - metadata is recommended to avoid confusion", - version, name_in_toml - )); - } - } - - if self.git.is_none() { - let git_only_keys = [ - (&self.branch, "branch"), - (&self.tag, "tag"), - (&self.rev, "rev"), - ]; - - for &(key, key_name) in &git_only_keys { - if key.is_some() { - let msg = format!( - "key `{}` is ignored for dependency ({}). \ - This will be considered an error in future versions", - key_name, name_in_toml - ); - cx.warnings.push(msg) - } - } - } - - let new_source_id = match ( - self.git.as_ref(), - self.path.as_ref(), - self.registry.as_ref(), - self.registry_index.as_ref(), - ) { - (Some(_), _, Some(_), _) | (Some(_), _, _, Some(_)) => bail!( - "dependency ({}) specification is ambiguous. \ - Only one of `git` or `registry` is allowed.", - name_in_toml - ), - (_, _, Some(_), Some(_)) => bail!( - "dependency ({}) specification is ambiguous. \ - Only one of `registry` or `registry-index` is allowed.", - name_in_toml - ), - (Some(git), maybe_path, _, _) => { - if maybe_path.is_some() { - let msg = format!( - "dependency ({}) specification is ambiguous. \ - Only one of `git` or `path` is allowed. \ - This will be considered an error in future versions", - name_in_toml - ); - cx.warnings.push(msg) - } - - let n_details = [&self.branch, &self.tag, &self.rev] - .iter() - .filter(|d| d.is_some()) - .count(); - - if n_details > 1 { - let msg = format!( - "dependency ({}) specification is ambiguous. \ - Only one of `branch`, `tag` or `rev` is allowed. \ - This will be considered an error in future versions", - name_in_toml - ); - cx.warnings.push(msg) - } - - let reference = self - .branch - .clone() - .map(GitReference::Branch) - .or_else(|| self.tag.clone().map(GitReference::Tag)) - .or_else(|| self.rev.clone().map(GitReference::Rev)) - .unwrap_or_else(|| GitReference::Branch("master".to_string())); - let loc = git.into_url()?; - SourceId::for_git(&loc, reference)? - } - (None, Some(path), _, _) => { - cx.nested_paths.push(PathBuf::from(path)); - // If the source ID for the package we're parsing is a path - // source, then we normalize the path here to get rid of - // components like `..`. - // - // The purpose of this is to get a canonical ID for the package - // that we're depending on to ensure that builds of this package - // always end up hashing to the same value no matter where it's - // built from. - if cx.source_id.is_path() { - let path = cx.root.join(path); - let path = util::normalize_path(&path); - SourceId::for_path(&path)? - } else { - cx.source_id - } - } - (None, None, Some(registry), None) => SourceId::alt_registry(cx.config, registry)?, - (None, None, None, Some(registry_index)) => { - let url = registry_index.into_url()?; - SourceId::for_registry(&url)? - } - (None, None, None, None) => SourceId::crates_io(cx.config)?, - }; - - let (pkg_name, explicit_name_in_toml) = match self.package { - Some(ref s) => (&s[..], Some(name_in_toml)), - None => (name_in_toml, None), - }; - - let version = self.version.as_ref().map(|v| &v[..]); - let mut dep = match cx.pkgid { - Some(id) => Dependency::parse(pkg_name, version, new_source_id, id, cx.config)?, - None => Dependency::parse_no_deprecated(pkg_name, version, new_source_id)?, - }; - dep.set_features(self.features.iter().flat_map(|x| x)) - .set_default_features( - self.default_features - .or(self.default_features2) - .unwrap_or(true), - ) - .set_optional(self.optional.unwrap_or(false)) - .set_platform(cx.platform.clone()); - if let Some(registry) = &self.registry { - let registry_id = SourceId::alt_registry(cx.config, registry)?; - dep.set_registry_id(registry_id); - } - if let Some(registry_index) = &self.registry_index { - let url = registry_index.into_url()?; - let registry_id = SourceId::for_registry(&url)?; - dep.set_registry_id(registry_id); - } - - if let Some(kind) = kind { - dep.set_kind(kind); - } - if let Some(name_in_toml) = explicit_name_in_toml { - cx.features.require(Feature::rename_dependency())?; - dep.set_explicit_name_in_toml(name_in_toml); - } - - if let Some(p) = self.public { - cx.features.require(Feature::public_dependency())?; - - if dep.kind() != Kind::Normal { - bail!("'public' specifier can only be used on regular dependencies, not {:?} dependencies", dep.kind()); - } - - dep.set_public(p); - } - Ok(dep) - } -} - -#[derive(Default, Serialize, Deserialize, Debug, Clone)] -struct TomlTarget { - name: Option, - - // The intention was to only accept `crate-type` here but historical - // versions of Cargo also accepted `crate_type`, so look for both. - #[serde(rename = "crate-type")] - crate_type: Option>, - #[serde(rename = "crate_type")] - crate_type2: Option>, - - path: Option, - test: Option, - doctest: Option, - bench: Option, - doc: Option, - plugin: Option, - #[serde(rename = "proc-macro")] - proc_macro: Option, - #[serde(rename = "proc_macro")] - proc_macro2: Option, - harness: Option, - #[serde(rename = "required-features")] - required_features: Option>, - edition: Option, -} - -#[derive(Clone)] -struct PathValue(PathBuf); - -impl<'de> de::Deserialize<'de> for PathValue { - fn deserialize(deserializer: D) -> Result - where - D: de::Deserializer<'de>, - { - Ok(PathValue(String::deserialize(deserializer)?.into())) - } -} - -impl ser::Serialize for PathValue { - fn serialize(&self, serializer: S) -> Result - where - S: ser::Serializer, - { - self.0.serialize(serializer) - } -} - -/// Corresponds to a `target` entry, but `TomlTarget` is already used. -#[derive(Serialize, Deserialize, Debug)] -struct TomlPlatform { - dependencies: Option>, - #[serde(rename = "build-dependencies")] - build_dependencies: Option>, - #[serde(rename = "build_dependencies")] - build_dependencies2: Option>, - #[serde(rename = "dev-dependencies")] - dev_dependencies: Option>, - #[serde(rename = "dev_dependencies")] - dev_dependencies2: Option>, -} - -impl TomlTarget { - fn new() -> TomlTarget { - TomlTarget::default() - } - - fn name(&self) -> String { - match self.name { - Some(ref name) => name.clone(), - None => panic!("target name is required"), - } - } - - fn proc_macro(&self) -> Option { - self.proc_macro.or(self.proc_macro2).or_else(|| { - if let Some(types) = self.crate_types() { - if types.contains(&"proc-macro".to_string()) { - return Some(true); - } - } - None - }) - } - - fn crate_types(&self) -> Option<&Vec> { - self.crate_type - .as_ref() - .or_else(|| self.crate_type2.as_ref()) - } -} - -impl fmt::Debug for PathValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; +use std::fmt; +use std::fs; +use std::path::{Path, PathBuf}; +use std::rc::Rc; +use std::str; + +use failure::bail; +use log::{debug, trace}; +use semver::{self, VersionReq}; +use serde::de; +use serde::ser; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::core::dependency::Kind; +use crate::core::manifest::{LibKind, ManifestMetadata, TargetSourcePath, Warnings}; +use crate::core::profiles::Profiles; +use crate::core::{Dependency, Manifest, PackageId, Summary, Target}; +use crate::core::{Edition, EitherManifest, Feature, Features, VirtualManifest}; +use crate::core::{GitReference, PackageIdSpec, SourceId, WorkspaceConfig, WorkspaceRootConfig}; +use crate::sources::{CRATES_IO_INDEX, CRATES_IO_REGISTRY}; +use crate::util::errors::{CargoResult, CargoResultExt, ManifestError}; +use crate::util::{self, paths, validate_package_name, Config, IntoUrl, Platform}; + +mod targets; +use self::targets::targets; + +pub fn read_manifest( + path: &Path, + source_id: SourceId, + config: &Config, +) -> Result<(EitherManifest, Vec), ManifestError> { + trace!( + "read_manifest; path={}; source-id={}", + path.display(), + source_id + ); + let contents = paths::read(path).map_err(|err| ManifestError::new(err, path.into()))?; + + do_read_manifest(&contents, path, source_id, config) + .chain_err(|| format!("failed to parse manifest at `{}`", path.display())) + .map_err(|err| ManifestError::new(err, path.into())) +} + +fn do_read_manifest( + contents: &str, + manifest_file: &Path, + source_id: SourceId, + config: &Config, +) -> CargoResult<(EitherManifest, Vec)> { + let package_root = manifest_file.parent().unwrap(); + + let toml = { + let pretty_filename = manifest_file + .strip_prefix(config.cwd()) + .unwrap_or(manifest_file); + parse(contents, pretty_filename, config)? + }; + + let mut unused = BTreeSet::new(); + let manifest: TomlManifest = serde_ignored::deserialize(toml, |path| { + let mut key = String::new(); + stringify(&mut key, &path); + unused.insert(key); + })?; + let add_unused = |warnings: &mut Warnings| { + for key in unused { + warnings.add_warning(format!("unused manifest key: {}", key)); + if key == "profile.debug" || key == "profiles.debug" { + warnings.add_warning("use `[profile.dev]` to configure debug builds".to_string()); + } + } + }; + + let manifest = Rc::new(manifest); + return if manifest.project.is_some() || manifest.package.is_some() { + let (mut manifest, paths) = + TomlManifest::to_real_manifest(&manifest, source_id, package_root, config)?; + add_unused(manifest.warnings_mut()); + if !manifest.targets().iter().any(|t| !t.is_custom_build()) { + bail!( + "no targets specified in the manifest\n \ + either src/lib.rs, src/main.rs, a [lib] section, or \ + [[bin]] section must be present" + ) + } + Ok((EitherManifest::Real(manifest), paths)) + } else { + let (mut m, paths) = + TomlManifest::to_virtual_manifest(&manifest, source_id, package_root, config)?; + add_unused(m.warnings_mut()); + Ok((EitherManifest::Virtual(m), paths)) + }; + + fn stringify(dst: &mut String, path: &serde_ignored::Path<'_>) { + use serde_ignored::Path; + + match *path { + Path::Root => {} + Path::Seq { parent, index } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(&index.to_string()); + } + Path::Map { parent, ref key } => { + stringify(dst, parent); + if !dst.is_empty() { + dst.push('.'); + } + dst.push_str(key); + } + Path::Some { parent } + | Path::NewtypeVariant { parent } + | Path::NewtypeStruct { parent } => stringify(dst, parent), + } + } +} + +pub fn parse(toml: &str, file: &Path, config: &Config) -> CargoResult { + let first_error = match toml.parse() { + Ok(ret) => return Ok(ret), + Err(e) => e, + }; + + let mut second_parser = toml::de::Deserializer::new(toml); + second_parser.set_require_newline_after_table(false); + if let Ok(ret) = toml::Value::deserialize(&mut second_parser) { + let msg = format!( + "\ +TOML file found which contains invalid syntax and will soon not parse +at `{}`. + +The TOML spec requires newlines after table definitions (e.g., `[a] b = 1` is +invalid), but this file has a table header which does not have a newline after +it. A newline needs to be added and this warning will soon become a hard error +in the future.", + file.display() + ); + config.shell().warn(&msg)?; + return Ok(ret); + } + + let mut third_parser = toml::de::Deserializer::new(toml); + third_parser.set_allow_duplicate_after_longer_table(true); + if let Ok(ret) = toml::Value::deserialize(&mut third_parser) { + let msg = format!( + "\ +TOML file found which contains invalid syntax and will soon not parse +at `{}`. + +The TOML spec requires that each table header is defined at most once, but +historical versions of Cargo have erroneously accepted this file. The table +definitions will need to be merged together with one table header to proceed, +and this will become a hard error in the future.", + file.display() + ); + config.shell().warn(&msg)?; + return Ok(ret); + } + + let first_error = failure::Error::from(first_error); + Err(first_error.context("could not parse input as TOML").into()) +} + +type TomlLibTarget = TomlTarget; +type TomlBinTarget = TomlTarget; +type TomlExampleTarget = TomlTarget; +type TomlTestTarget = TomlTarget; +type TomlBenchTarget = TomlTarget; + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum TomlDependency { + Simple(String), + Detailed(DetailedTomlDependency), +} + +impl<'de> de::Deserialize<'de> for TomlDependency { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct TomlDependencyVisitor; + + impl<'de> de::Visitor<'de> for TomlDependencyVisitor { + type Value = TomlDependency; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str( + "a version string like \"0.9.8\" or a \ + detailed dependency like { version = \"0.9.8\" }", + ) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(TomlDependency::Simple(s.to_owned())) + } + + fn visit_map(self, map: V) -> Result + where + V: de::MapAccess<'de>, + { + let mvd = de::value::MapAccessDeserializer::new(map); + DetailedTomlDependency::deserialize(mvd).map(TomlDependency::Detailed) + } + } + + deserializer.deserialize_any(TomlDependencyVisitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +#[serde(rename_all = "kebab-case")] +pub struct DetailedTomlDependency { + version: Option, + registry: Option, + /// The URL of the `registry` field. + /// This is an internal implementation detail. When Cargo creates a + /// package, it replaces `registry` with `registry-index` so that the + /// manifest contains the correct URL. All users won't have the same + /// registry names configured, so Cargo can't rely on just the name for + /// crates published by other users. + registry_index: Option, + path: Option, + git: Option, + branch: Option, + tag: Option, + rev: Option, + features: Option>, + optional: Option, + default_features: Option, + #[serde(rename = "default_features")] + default_features2: Option, + package: Option, + public: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub struct TomlManifest { + cargo_features: Option>, + package: Option>, + project: Option>, + profile: Option, + lib: Option, + bin: Option>, + example: Option>, + test: Option>, + bench: Option>, + dependencies: Option>, + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + features: Option>>, + target: Option>, + replace: Option>, + patch: Option>>, + workspace: Option, + badges: Option>>, +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default)] +pub struct TomlProfiles { + pub test: Option, + pub doc: Option, + pub bench: Option, + pub dev: Option, + pub release: Option, +} + +impl TomlProfiles { + pub fn validate(&self, features: &Features, warnings: &mut Vec) -> CargoResult<()> { + if let Some(ref test) = self.test { + test.validate("test", features, warnings)?; + } + if let Some(ref doc) = self.doc { + doc.validate("doc", features, warnings)?; + } + if let Some(ref bench) = self.bench { + bench.validate("bench", features, warnings)?; + } + if let Some(ref dev) = self.dev { + dev.validate("dev", features, warnings)?; + } + if let Some(ref release) = self.release { + release.validate("release", features, warnings)?; + } + Ok(()) + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TomlOptLevel(pub String); + +impl<'de> de::Deserialize<'de> for TomlOptLevel { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = TomlOptLevel; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("an optimization level") + } + + fn visit_i64(self, value: i64) -> Result + where + E: de::Error, + { + Ok(TomlOptLevel(value.to_string())) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + if value == "s" || value == "z" { + Ok(TomlOptLevel(value.to_string())) + } else { + Err(E::custom(format!( + "must be an integer, `z`, or `s`, \ + but found: {}", + value + ))) + } + } + } + + d.deserialize_any(Visitor) + } +} + +impl ser::Serialize for TomlOptLevel { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + match self.0.parse::() { + Ok(n) => n.serialize(serializer), + Err(_) => self.0.serialize(serializer), + } + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum U32OrBool { + U32(u32), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for U32OrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = U32OrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or an integer") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(U32OrBool::Bool(b)) + } + + fn visit_i64(self, u: i64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + + fn visit_u64(self, u: u64) -> Result + where + E: de::Error, + { + Ok(U32OrBool::U32(u as u32)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Deserialize, Serialize, Clone, Debug, Default, Eq, PartialEq)] +#[serde(rename_all = "kebab-case")] +pub struct TomlProfile { + pub opt_level: Option, + pub lto: Option, + pub codegen_units: Option, + pub debug: Option, + pub debug_assertions: Option, + pub rpath: Option, + pub panic: Option, + pub overflow_checks: Option, + pub incremental: Option, + pub overrides: Option>, + pub build_override: Option>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)] +pub enum ProfilePackageSpec { + Spec(PackageIdSpec), + All, +} + +impl ser::Serialize for ProfilePackageSpec { + fn serialize(&self, s: S) -> Result + where + S: ser::Serializer, + { + match *self { + ProfilePackageSpec::Spec(ref spec) => spec.serialize(s), + ProfilePackageSpec::All => "*".serialize(s), + } + } +} + +impl<'de> de::Deserialize<'de> for ProfilePackageSpec { + fn deserialize(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let string = String::deserialize(d)?; + if string == "*" { + Ok(ProfilePackageSpec::All) + } else { + PackageIdSpec::parse(&string) + .map_err(de::Error::custom) + .map(ProfilePackageSpec::Spec) + } + } +} + +impl TomlProfile { + pub fn validate( + &self, + name: &str, + features: &Features, + warnings: &mut Vec, + ) -> CargoResult<()> { + if let Some(ref profile) = self.build_override { + features.require(Feature::profile_overrides())?; + profile.validate_override()?; + } + if let Some(ref override_map) = self.overrides { + features.require(Feature::profile_overrides())?; + for profile in override_map.values() { + profile.validate_override()?; + } + } + + match name { + "dev" | "release" => {} + _ => { + if self.overrides.is_some() || self.build_override.is_some() { + bail!( + "Profile overrides may only be specified for \ + `dev` or `release` profile, not `{}`.", + name + ); + } + } + } + + match name { + "doc" => { + warnings.push("profile `doc` is deprecated and has no effect".to_string()); + } + "test" | "bench" => { + if self.panic.is_some() { + warnings.push(format!("`panic` setting is ignored for `{}` profile", name)) + } + } + _ => {} + } + + if let Some(panic) = &self.panic { + if panic != "unwind" && panic != "abort" { + bail!( + "`panic` setting of `{}` is not a valid setting,\ + must be `unwind` or `abort`", + panic + ); + } + } + Ok(()) + } + + fn validate_override(&self) -> CargoResult<()> { + if self.overrides.is_some() || self.build_override.is_some() { + bail!("Profile overrides cannot be nested."); + } + if self.panic.is_some() { + bail!("`panic` may not be specified in a profile override.") + } + if self.lto.is_some() { + bail!("`lto` may not be specified in a profile override.") + } + if self.rpath.is_some() { + bail!("`rpath` may not be specified in a profile override.") + } + Ok(()) + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +pub struct StringOrVec(Vec); + +impl<'de> de::Deserialize<'de> for StringOrVec { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrVec; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("string or list of strings") + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(StringOrVec(vec![s.to_string()])) + } + + fn visit_seq(self, v: V) -> Result + where + V: de::SeqAccess<'de>, + { + let seq = de::value::SeqAccessDeserializer::new(v); + Vec::deserialize(seq).map(StringOrVec) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub enum StringOrBool { + String(String), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for StringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = StringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or a string") + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(StringOrBool::Bool(b)) + } + + fn visit_str(self, s: &str) -> Result + where + E: de::Error, + { + Ok(StringOrBool::String(s.to_string())) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +pub enum VecStringOrBool { + VecString(Vec), + Bool(bool), +} + +impl<'de> de::Deserialize<'de> for VecStringOrBool { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + + impl<'de> de::Visitor<'de> for Visitor { + type Value = VecStringOrBool; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a boolean or vector of strings") + } + + fn visit_seq(self, v: V) -> Result + where + V: de::SeqAccess<'de>, + { + let seq = de::value::SeqAccessDeserializer::new(v); + Vec::deserialize(seq).map(VecStringOrBool::VecString) + } + + fn visit_bool(self, b: bool) -> Result + where + E: de::Error, + { + Ok(VecStringOrBool::Bool(b)) + } + } + + deserializer.deserialize_any(Visitor) + } +} + +/// Represents the `package`/`project` sections of a `Cargo.toml`. +/// +/// Note that the order of the fields matters, since this is the order they +/// are serialized to a TOML file. For example, you cannot have values after +/// the field `metadata`, since it is a table and values cannot appear after +/// tables. +#[derive(Deserialize, Serialize, Clone, Debug)] +pub struct TomlProject { + edition: Option, + name: String, + version: semver::Version, + authors: Option>, + build: Option, + metabuild: Option, + links: Option, + exclude: Option>, + include: Option>, + publish: Option, + #[serde(rename = "publish-lockfile")] + publish_lockfile: Option, + workspace: Option, + #[serde(rename = "im-a-teapot")] + im_a_teapot: Option, + autobins: Option, + autoexamples: Option, + autotests: Option, + autobenches: Option, + #[serde(rename = "namespaced-features")] + namespaced_features: Option, + #[serde(rename = "default-run")] + default_run: Option, + + // Package metadata. + description: Option, + homepage: Option, + documentation: Option, + readme: Option, + keywords: Option>, + categories: Option>, + license: Option, + #[serde(rename = "license-file")] + license_file: Option, + repository: Option, + metadata: Option, +} + +#[derive(Debug, Deserialize, Serialize)] +pub struct TomlWorkspace { + members: Option>, + #[serde(rename = "default-members")] + default_members: Option>, + exclude: Option>, +} + +impl TomlProject { + pub fn to_package_id(&self, source_id: SourceId) -> CargoResult { + PackageId::new(&self.name, self.version.clone(), source_id) + } +} + +struct Context<'a, 'b> { + pkgid: Option, + deps: &'a mut Vec, + source_id: SourceId, + nested_paths: &'a mut Vec, + config: &'b Config, + warnings: &'a mut Vec, + platform: Option, + root: &'a Path, + features: &'a Features, +} + +impl TomlManifest { + pub fn prepare_for_publish(&self, config: &Config) -> CargoResult { + let mut package = self + .package + .as_ref() + .or_else(|| self.project.as_ref()) + .unwrap() + .clone(); + package.workspace = None; + return Ok(TomlManifest { + package: Some(package), + project: None, + profile: self.profile.clone(), + lib: self.lib.clone(), + bin: self.bin.clone(), + example: self.example.clone(), + test: self.test.clone(), + bench: self.bench.clone(), + dependencies: map_deps(config, self.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + self.dev_dependencies + .as_ref() + .or_else(|| self.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + self.build_dependencies + .as_ref() + .or_else(|| self.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + features: self.features.clone(), + target: match self.target.as_ref().map(|target_map| { + target_map + .iter() + .map(|(k, v)| { + Ok(( + k.clone(), + TomlPlatform { + features: v.features.clone(), + dependencies: map_deps(config, v.dependencies.as_ref())?, + dev_dependencies: map_deps( + config, + v.dev_dependencies + .as_ref() + .or_else(|| v.dev_dependencies2.as_ref()), + )?, + dev_dependencies2: None, + build_dependencies: map_deps( + config, + v.build_dependencies + .as_ref() + .or_else(|| v.build_dependencies2.as_ref()), + )?, + build_dependencies2: None, + }, + )) + }) + .collect() + }) { + Some(Ok(v)) => Some(v), + Some(Err(e)) => return Err(e), + None => None, + }, + replace: None, + patch: None, + workspace: None, + badges: self.badges.clone(), + cargo_features: self.cargo_features.clone(), + }); + + fn map_deps( + config: &Config, + deps: Option<&BTreeMap>, + ) -> CargoResult>> { + let deps = match deps { + Some(deps) => deps, + None => return Ok(None), + }; + let deps = deps + .iter() + .map(|(k, v)| Ok((k.clone(), map_dependency(config, v)?))) + .collect::>>()?; + Ok(Some(deps)) + } + + fn map_dependency(config: &Config, dep: &TomlDependency) -> CargoResult { + match *dep { + TomlDependency::Detailed(ref d) => { + let mut d = d.clone(); + d.path.take(); // path dependencies become crates.io deps + // registry specifications are elaborated to the index URL + if let Some(registry) = d.registry.take() { + let src = SourceId::alt_registry(config, ®istry)?; + d.registry_index = Some(src.url().to_string()); + } + Ok(TomlDependency::Detailed(d)) + } + TomlDependency::Simple(ref s) => { + Ok(TomlDependency::Detailed(DetailedTomlDependency { + version: Some(s.clone()), + ..Default::default() + })) + } + } + } + } + + pub fn to_real_manifest( + me: &Rc, + source_id: SourceId, + package_root: &Path, + config: &Config, + ) -> CargoResult<(Manifest, Vec)> { + let mut nested_paths = vec![]; + let mut warnings = vec![]; + let mut errors = vec![]; + + // Parse features first so they will be available when parsing other parts of the TOML. + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(cargo_features, &mut warnings)?; + + let project = me.project.as_ref().or_else(|| me.package.as_ref()); + let project = project.ok_or_else(|| failure::format_err!("no `package` section found"))?; + + let package_name = project.name.trim(); + if package_name.is_empty() { + bail!("package name cannot be an empty string") + } + + validate_package_name(package_name, "package name", "")?; + + let pkgid = project.to_package_id(source_id)?; + + let edition = if let Some(ref edition) = project.edition { + features + .require(Feature::edition()) + .chain_err(|| "editions are unstable")?; + edition + .parse() + .chain_err(|| "failed to parse the `edition` key")? + } else { + Edition::Edition2015 + }; + + if project.metabuild.is_some() { + features.require(Feature::metabuild())?; + } + + // If we have no lib at all, use the inferred lib, if available. + // If we have a lib with a path, we're done. + // If we have a lib with no path, use the inferred lib or else the package name. + let targets = targets( + &features, + me, + package_name, + package_root, + edition, + &project.build, + &project.metabuild, + &mut warnings, + &mut errors, + )?; + + if targets.is_empty() { + debug!("manifest has no build targets"); + } + + if let Err(e) = unique_build_targets(&targets, package_root) { + warnings.push(format!( + "file found to be present in multiple \ + build targets: {}", + e + )); + } + + let mut deps = Vec::new(); + let mut ftrs = BTreeMap::new(); + let replace; + let patch; + + { + let mut cx = Context { + pkgid: Some(pkgid), + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + features: &features, + platform: None, + root: package_root, + }; + + fn process_dependencies( + cx: &mut Context<'_, '_>, + new_deps: Option<&BTreeMap>, + kind: Option, + ) -> CargoResult<()> { + let dependencies = match new_deps { + Some(dependencies) => dependencies, + None => return Ok(()), + }; + for (n, v) in dependencies.iter() { + let dep = v.to_dependency(n, cx, kind)?; + cx.deps.push(dep); + } + + Ok(()) + } + fn process_features( + ftrs: &mut BTreeMap, Vec)>, + new_ftrs: Option<&BTreeMap>>, + platform: Option<&Platform>, + ) -> CargoResult<()> { + let features = match new_ftrs { + Some(features) => features, + None => return Ok(()), + }; + for (n, v) in features.iter() { + ftrs.insert(n.clone(), (platform.cloned(), v.clone())); + } + + Ok(()) + } + + // Collect the dependencies. + process_dependencies(&mut cx, me.dependencies.as_ref(), None)?; + let dev_deps = me + .dev_dependencies + .as_ref() + .or_else(|| me.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + let build_deps = me + .build_dependencies + .as_ref() + .or_else(|| me.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + process_features(&mut ftrs, me.features.as_ref(), None)?; + + for (name, platform) in me.target.iter().flat_map(|t| t) { + cx.platform = Some(name.parse()?); + process_dependencies(&mut cx, platform.dependencies.as_ref(), None)?; + let build_deps = platform + .build_dependencies + .as_ref() + .or_else(|| platform.build_dependencies2.as_ref()); + process_dependencies(&mut cx, build_deps, Some(Kind::Build))?; + let dev_deps = platform + .dev_dependencies + .as_ref() + .or_else(|| platform.dev_dependencies2.as_ref()); + process_dependencies(&mut cx, dev_deps, Some(Kind::Development))?; + process_features(&mut ftrs, platform.features.as_ref(), cx.platform.as_ref())?; + } + + replace = me.replace(&mut cx)?; + patch = me.patch(&mut cx)?; + } + + { + let mut names_sources = BTreeMap::new(); + for dep in &deps { + let name = dep.name_in_toml(); + let prev = names_sources.insert(name.to_string(), dep.source_id()); + if prev.is_some() && prev != Some(dep.source_id()) { + bail!( + "Dependency '{}' has different source paths depending on the build \ + target. Each dependency must have a single canonical source path \ + irrespective of build target.", + name + ); + } + } + } + + let exclude = project.exclude.clone().unwrap_or_default(); + let include = project.include.clone().unwrap_or_default(); + if project.namespaced_features.is_some() { + features.require(Feature::namespaced_features())?; + } + + let summary = Summary::new( + pkgid, + deps, + &ftrs, + project.links.as_ref().map(|x| x.as_str()), + project.namespaced_features.unwrap_or(false), + )?; + let metadata = ManifestMetadata { + description: project.description.clone(), + homepage: project.homepage.clone(), + documentation: project.documentation.clone(), + readme: project.readme.clone(), + authors: project.authors.clone().unwrap_or_default(), + license: project.license.clone(), + license_file: project.license_file.clone(), + repository: project.repository.clone(), + keywords: project.keywords.clone().unwrap_or_default(), + categories: project.categories.clone().unwrap_or_default(), + badges: me.badges.clone().unwrap_or_default(), + links: project.links.clone(), + }; + + let workspace_config = match (me.workspace.as_ref(), project.workspace.as_ref()) { + (Some(config), None) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + package_root, + &config.members, + &config.default_members, + &config.exclude, + )), + (None, root) => WorkspaceConfig::Member { + root: root.cloned(), + }, + (Some(..), Some(..)) => bail!( + "cannot configure both `package.workspace` and \ + `[workspace]`, only one can be specified" + ), + }; + let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; + let publish = match project.publish { + Some(VecStringOrBool::VecString(ref vecstring)) => Some(vecstring.clone()), + Some(VecStringOrBool::Bool(false)) => Some(vec![]), + None | Some(VecStringOrBool::Bool(true)) => None, + }; + + let publish_lockfile = match project.publish_lockfile { + Some(b) => { + features.require(Feature::publish_lockfile())?; + warnings.push( + "The `publish-lockfile` feature is deprecated and currently \ + has no effect. It may be removed in a future version." + .to_string(), + ); + b + } + None => features.is_enabled(Feature::publish_lockfile()), + }; + + if summary.features().contains_key("default-features") { + warnings.push( + "`default-features = [\"..\"]` was found in [features]. \ + Did you mean to use `default = [\"..\"]`?" + .to_string(), + ) + } + + if let Some(run) = &project.default_run { + if !targets + .iter() + .filter(|t| t.is_bin()) + .any(|t| t.name() == run) + { + let suggestion = + util::closest_msg(run, targets.iter().filter(|t| t.is_bin()), |t| t.name()); + bail!("default-run target `{}` not found{}", run, suggestion); + } + } + + let custom_metadata = project.metadata.clone(); + let mut manifest = Manifest::new( + summary, + targets, + exclude, + include, + project.links.clone(), + metadata, + custom_metadata, + profiles, + publish, + publish_lockfile, + replace, + patch, + workspace_config, + features, + edition, + project.im_a_teapot, + project.default_run.clone(), + Rc::clone(me), + project.metabuild.clone().map(|sov| sov.0), + ); + if project.license_file.is_some() && project.license.is_some() { + manifest.warnings_mut().add_warning( + "only one of `license` or \ + `license-file` is necessary" + .to_string(), + ); + } + for warning in warnings { + manifest.warnings_mut().add_warning(warning); + } + for error in errors { + manifest.warnings_mut().add_critical_warning(error); + } + + manifest.feature_gate()?; + + Ok((manifest, nested_paths)) + } + + fn to_virtual_manifest( + me: &Rc, + source_id: SourceId, + root: &Path, + config: &Config, + ) -> CargoResult<(VirtualManifest, Vec)> { + if me.project.is_some() { + bail!("virtual manifests do not define [project]"); + } + if me.package.is_some() { + bail!("virtual manifests do not define [package]"); + } + if me.lib.is_some() { + bail!("virtual manifests do not specify [lib]"); + } + if me.bin.is_some() { + bail!("virtual manifests do not specify [[bin]]"); + } + if me.example.is_some() { + bail!("virtual manifests do not specify [[example]]"); + } + if me.test.is_some() { + bail!("virtual manifests do not specify [[test]]"); + } + if me.bench.is_some() { + bail!("virtual manifests do not specify [[bench]]"); + } + if me.dependencies.is_some() { + bail!("virtual manifests do not specify [dependencies]"); + } + if me.dev_dependencies.is_some() || me.dev_dependencies2.is_some() { + bail!("virtual manifests do not specify [dev-dependencies]"); + } + if me.build_dependencies.is_some() || me.build_dependencies2.is_some() { + bail!("virtual manifests do not specify [build-dependencies]"); + } + if me.features.is_some() { + bail!("virtual manifests do not specify [features]"); + } + if me.target.is_some() { + bail!("virtual manifests do not specify [target]"); + } + if me.badges.is_some() { + bail!("virtual manifests do not specify [badges]"); + } + + let mut nested_paths = Vec::new(); + let mut warnings = Vec::new(); + let mut deps = Vec::new(); + let empty = Vec::new(); + let cargo_features = me.cargo_features.as_ref().unwrap_or(&empty); + let features = Features::new(cargo_features, &mut warnings)?; + + let (replace, patch) = { + let mut cx = Context { + pkgid: None, + deps: &mut deps, + source_id, + nested_paths: &mut nested_paths, + config, + warnings: &mut warnings, + platform: None, + features: &features, + root, + }; + (me.replace(&mut cx)?, me.patch(&mut cx)?) + }; + let profiles = Profiles::new(me.profile.as_ref(), config, &features, &mut warnings)?; + let workspace_config = match me.workspace { + Some(ref config) => WorkspaceConfig::Root(WorkspaceRootConfig::new( + root, + &config.members, + &config.default_members, + &config.exclude, + )), + None => { + bail!("virtual manifests must be configured with [workspace]"); + } + }; + Ok(( + VirtualManifest::new(replace, patch, workspace_config, profiles, features), + nested_paths, + )) + } + + fn replace(&self, cx: &mut Context<'_, '_>) -> CargoResult> { + if self.patch.is_some() && self.replace.is_some() { + bail!("cannot specify both [replace] and [patch]"); + } + let mut replace = Vec::new(); + for (spec, replacement) in self.replace.iter().flat_map(|x| x) { + let mut spec = PackageIdSpec::parse(spec).chain_err(|| { + format!( + "replacements must specify a valid semver \ + version to replace, but `{}` does not", + spec + ) + })?; + if spec.url().is_none() { + spec.set_url(CRATES_IO_INDEX.parse().unwrap()); + } + + let version_specified = match *replacement { + TomlDependency::Detailed(ref d) => d.version.is_some(), + TomlDependency::Simple(..) => true, + }; + if version_specified { + bail!( + "replacements cannot specify a version \ + requirement, but found one for `{}`", + spec + ); + } + + let mut dep = replacement.to_dependency(spec.name().as_str(), cx, None)?; + { + let version = spec.version().ok_or_else(|| { + failure::format_err!( + "replacements must specify a version \ + to replace, but `{}` does not", + spec + ) + })?; + dep.set_version_req(VersionReq::exact(version)); + } + replace.push((spec, dep)); + } + Ok(replace) + } + + fn patch(&self, cx: &mut Context<'_, '_>) -> CargoResult>> { + let mut patch = HashMap::new(); + for (url, deps) in self.patch.iter().flat_map(|x| x) { + let url = match &url[..] { + CRATES_IO_REGISTRY => CRATES_IO_INDEX.parse().unwrap(), + _ => cx + .config + .get_registry_index(url) + .or_else(|_| url.into_url()) + .chain_err(|| { + format!("[patch] entry `{}` should be a URL or registry name", url) + })?, + }; + patch.insert( + url, + deps.iter() + .map(|(name, dep)| dep.to_dependency(name, cx, None)) + .collect::>>()?, + ); + } + Ok(patch) + } + + fn maybe_custom_build( + &self, + build: &Option, + package_root: &Path, + ) -> Option { + let build_rs = package_root.join("build.rs"); + match *build { + // Explicitly no build script. + Some(StringOrBool::Bool(false)) => None, + Some(StringOrBool::Bool(true)) => Some(build_rs), + Some(StringOrBool::String(ref s)) => Some(PathBuf::from(s)), + None => { + match fs::metadata(&build_rs) { + // If there is a `build.rs` file next to the `Cargo.toml`, assume it is + // a build script. + Ok(ref e) if e.is_file() => Some(build_rs), + Ok(_) | Err(_) => None, + } + } + } + } + + pub fn has_profiles(&self) -> bool { + self.profile.is_some() + } +} + +/// Checks a list of build targets, and ensures the target names are unique within a vector. +/// If not, the name of the offending build target is returned. +fn unique_build_targets(targets: &[Target], package_root: &Path) -> Result<(), String> { + let mut seen = HashSet::new(); + for target in targets { + if let TargetSourcePath::Path(path) = target.src_path() { + let full = package_root.join(path); + if !seen.insert(full.clone()) { + return Err(full.display().to_string()); + } + } + } + Ok(()) +} + +impl TomlDependency { + fn to_dependency( + &self, + name: &str, + cx: &mut Context<'_, '_>, + kind: Option, + ) -> CargoResult { + match *self { + TomlDependency::Simple(ref version) => DetailedTomlDependency { + version: Some(version.clone()), + ..Default::default() + } + .to_dependency(name, cx, kind), + TomlDependency::Detailed(ref details) => details.to_dependency(name, cx, kind), + } + } +} + +impl DetailedTomlDependency { + fn to_dependency( + &self, + name_in_toml: &str, + cx: &mut Context<'_, '_>, + kind: Option, + ) -> CargoResult { + if self.version.is_none() && self.path.is_none() && self.git.is_none() { + let msg = format!( + "dependency ({}) specified without \ + providing a local path, Git repository, or \ + version to use. This will be considered an \ + error in future versions", + name_in_toml + ); + cx.warnings.push(msg); + } + + if let Some(version) = &self.version { + if version.contains('+') { + cx.warnings.push(format!( + "version requirement `{}` for dependency `{}` \ + includes semver metadata which will be ignored, removing the \ + metadata is recommended to avoid confusion", + version, name_in_toml + )); + } + } + + if self.git.is_none() { + let git_only_keys = [ + (&self.branch, "branch"), + (&self.tag, "tag"), + (&self.rev, "rev"), + ]; + + for &(key, key_name) in &git_only_keys { + if key.is_some() { + let msg = format!( + "key `{}` is ignored for dependency ({}). \ + This will be considered an error in future versions", + key_name, name_in_toml + ); + cx.warnings.push(msg) + } + } + } + + let new_source_id = match ( + self.git.as_ref(), + self.path.as_ref(), + self.registry.as_ref(), + self.registry_index.as_ref(), + ) { + (Some(_), _, Some(_), _) | (Some(_), _, _, Some(_)) => bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `registry` is allowed.", + name_in_toml + ), + (_, _, Some(_), Some(_)) => bail!( + "dependency ({}) specification is ambiguous. \ + Only one of `registry` or `registry-index` is allowed.", + name_in_toml + ), + (Some(git), maybe_path, _, _) => { + if maybe_path.is_some() { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `git` or `path` is allowed. \ + This will be considered an error in future versions", + name_in_toml + ); + cx.warnings.push(msg) + } + + let n_details = [&self.branch, &self.tag, &self.rev] + .iter() + .filter(|d| d.is_some()) + .count(); + + if n_details > 1 { + let msg = format!( + "dependency ({}) specification is ambiguous. \ + Only one of `branch`, `tag` or `rev` is allowed. \ + This will be considered an error in future versions", + name_in_toml + ); + cx.warnings.push(msg) + } + + let reference = self + .branch + .clone() + .map(GitReference::Branch) + .or_else(|| self.tag.clone().map(GitReference::Tag)) + .or_else(|| self.rev.clone().map(GitReference::Rev)) + .unwrap_or_else(|| GitReference::Branch("master".to_string())); + let loc = git.into_url()?; + SourceId::for_git(&loc, reference)? + } + (None, Some(path), _, _) => { + cx.nested_paths.push(PathBuf::from(path)); + // If the source ID for the package we're parsing is a path + // source, then we normalize the path here to get rid of + // components like `..`. + // + // The purpose of this is to get a canonical ID for the package + // that we're depending on to ensure that builds of this package + // always end up hashing to the same value no matter where it's + // built from. + if cx.source_id.is_path() { + let path = cx.root.join(path); + let path = util::normalize_path(&path); + SourceId::for_path(&path)? + } else { + cx.source_id + } + } + (None, None, Some(registry), None) => SourceId::alt_registry(cx.config, registry)?, + (None, None, None, Some(registry_index)) => { + let url = registry_index.into_url()?; + SourceId::for_registry(&url)? + } + (None, None, None, None) => SourceId::crates_io(cx.config)?, + }; + + let (pkg_name, explicit_name_in_toml) = match self.package { + Some(ref s) => (&s[..], Some(name_in_toml)), + None => (name_in_toml, None), + }; + + let version = self.version.as_ref().map(|v| &v[..]); + let mut dep = match cx.pkgid { + Some(id) => Dependency::parse(pkg_name, version, new_source_id, id, cx.config)?, + None => Dependency::parse_no_deprecated(pkg_name, version, new_source_id)?, + }; + dep.set_features(self.features.iter().flat_map(|x| x)) + .set_default_features( + self.default_features + .or(self.default_features2) + .unwrap_or(true), + ) + .set_optional(self.optional.unwrap_or(false)) + .set_platform(cx.platform.clone()); + if let Some(registry) = &self.registry { + let registry_id = SourceId::alt_registry(cx.config, registry)?; + dep.set_registry_id(registry_id); + } + if let Some(registry_index) = &self.registry_index { + let url = registry_index.into_url()?; + let registry_id = SourceId::for_registry(&url)?; + dep.set_registry_id(registry_id); + } + + if let Some(kind) = kind { + dep.set_kind(kind); + } + if let Some(name_in_toml) = explicit_name_in_toml { + cx.features.require(Feature::rename_dependency())?; + dep.set_explicit_name_in_toml(name_in_toml); + } + + if let Some(p) = self.public { + cx.features.require(Feature::public_dependency())?; + + if dep.kind() != Kind::Normal { + bail!("'public' specifier can only be used on regular dependencies, not {:?} dependencies", dep.kind()); + } + + dep.set_public(p); + } + Ok(dep) + } +} + +#[derive(Default, Serialize, Deserialize, Debug, Clone)] +struct TomlTarget { + name: Option, + + // The intention was to only accept `crate-type` here but historical + // versions of Cargo also accepted `crate_type`, so look for both. + #[serde(rename = "crate-type")] + crate_type: Option>, + #[serde(rename = "crate_type")] + crate_type2: Option>, + + path: Option, + test: Option, + doctest: Option, + bench: Option, + doc: Option, + plugin: Option, + #[serde(rename = "proc-macro")] + proc_macro: Option, + #[serde(rename = "proc_macro")] + proc_macro2: Option, + harness: Option, + #[serde(rename = "required-features")] + required_features: Option>, + edition: Option, +} + +#[derive(Clone)] +struct PathValue(PathBuf); + +impl<'de> de::Deserialize<'de> for PathValue { + fn deserialize(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + Ok(PathValue(String::deserialize(deserializer)?.into())) + } +} + +impl ser::Serialize for PathValue { + fn serialize(&self, serializer: S) -> Result + where + S: ser::Serializer, + { + self.0.serialize(serializer) + } +} + +/// Corresponds to a `target` entry, but `TomlTarget` is already used. +#[derive(Serialize, Deserialize, Debug)] +struct TomlPlatform { + features: Option>>, + dependencies: Option>, + #[serde(rename = "build-dependencies")] + build_dependencies: Option>, + #[serde(rename = "build_dependencies")] + build_dependencies2: Option>, + #[serde(rename = "dev-dependencies")] + dev_dependencies: Option>, + #[serde(rename = "dev_dependencies")] + dev_dependencies2: Option>, +} + +impl TomlTarget { + fn new() -> TomlTarget { + TomlTarget::default() + } + + fn name(&self) -> String { + match self.name { + Some(ref name) => name.clone(), + None => panic!("target name is required"), + } + } + + fn proc_macro(&self) -> Option { + self.proc_macro.or(self.proc_macro2).or_else(|| { + if let Some(types) = self.crate_types() { + if types.contains(&"proc-macro".to_string()) { + return Some(true); + } + } + None + }) + } + + fn crate_types(&self) -> Option<&Vec> { + self.crate_type + .as_ref() + .or_else(|| self.crate_type2.as_ref()) + } +} + +impl fmt::Debug for PathValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} diff --git a/tests/testsuite/cfg_features.rs b/tests/testsuite/cfg_features.rs new file mode 100644 index 00000000000..99d0742e642 --- /dev/null +++ b/tests/testsuite/cfg_features.rs @@ -0,0 +1,224 @@ +use crate::support::project; + +#[cargo_test] +fn syntax() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg(unix)'.features] + b = [] + [target.'cfg(windows)'.features] + b = [] + "#, + ) + .file( + "src/lib.rs", + r#" + pub fn bb() {} + "#, + ) + .build(); + p.cargo("build") + .with_stderr( + "\ +[COMPILING] a v0.0.1 ([CWD]) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +", + ) + .run(); +} + +#[cargo_test] +fn include_by_param() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg(unix)'.features] + b = [] + [target.'cfg(windows)'.features] + c = [] + "#, + ) + .file( + "src/lib.rs", + r#" + #[cfg(feature = "b")] + pub const BB: usize = 0; + #[cfg(feature = "c")] + pub const BB: usize = 1; + + pub fn bb() -> Result<(), ()> { if BB > 0 { Ok(()) } else { Err(()) } } + "#, + ) + .build(); + p.cargo(format!("build --features {}", if cfg!(unix) { "b" } else { "c" }).as_str()) + .with_stderr( + "\ +[COMPILING] a v0.0.1 ([CWD]) +[FINISHED] dev [unoptimized + debuginfo] target(s) in [..] +", + ) + .run(); +} + +#[cargo_test] +fn dont_include_by_platform() { + let other_family = if cfg!(unix) { "windows" } else { "unix" }; + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg({})'.features] + b = [] + "#, + other_family + ), + ) + .file( + "src/lib.rs", + r#" + #[cfg(feature = "b")] + pub const BB: usize = 0; + + pub fn bb() { let _ = BB; } + "#, + ) + .build(); + p.cargo("build --features b -vv") + .with_status(101) + .with_stderr_contains( + "\ + error[E0425]: cannot find value `BB` in this scope", + ) + .run(); +} + +#[cargo_test] +fn dont_include_by_param() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg(unix)'.features] + b = [] + [target.'cfg(windows)'.features] + c = [] + "#, + ) + .file( + "src/lib.rs", + r#" + #[cfg(feature = "b")] + pub const BB: usize = 0; + #[cfg(feature = "c")] + pub const BB: usize = 1; + + pub fn bb() -> Result<(), ()> { if BB > 0 { Ok(()) } else { Err(()) } } + "#, + ) + .build(); + p.cargo("build -v") + .with_status(101) + .with_stderr_contains( + "\ + error[E0425]: cannot find value `BB` in this scope", + ) + .run(); +} + +#[cargo_test] +fn dont_include_default() { + let other_family = if cfg!(unix) { "windows" } else { "unix" }; + let p = project() + .file( + "Cargo.toml", + &format!( + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg({})'.features] + b = [] + + [features] + default = ["b"] + "#, + other_family + ), + ) + .file( + "src/lib.rs", + r#" + #[cfg(feature = "b")] + pub const BB: usize = 0; + + pub fn bb() { let _ = BB; } + "#, + ) + .build(); + p.cargo("build -v") + .with_status(101) + .with_stderr_contains( + "\ + error[E0425]: cannot find value `BB` in this scope", + ) + .run(); +} + +// https://github.com/rust-lang/cargo/issues/5313 +#[cargo_test] +#[cfg(all(target_arch = "x86_64", target_os = "linux", target_env = "gnu"))] +fn cfg_looks_at_rustflags_for_target() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "a" + version = "0.0.1" + authors = [] + + [target.'cfg(with_b)'.features] + b = [] + "#, + ) + .file( + "src/main.rs", + r#" + #[cfg(with_b)] + pub const BB: usize = 0; + + fn main() { let _ = BB; } + "#, + ) + .build(); + + p.cargo("build --target x86_64-unknown-linux-gnu") + .env("RUSTFLAGS", "--cfg with_b") + .run(); +} diff --git a/tests/testsuite/main.rs b/tests/testsuite/main.rs index 618c92ceb23..9d4a3d7e736 100644 --- a/tests/testsuite/main.rs +++ b/tests/testsuite/main.rs @@ -1,106 +1,107 @@ -#![warn(rust_2018_idioms)] // while we're getting used to 2018 -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![allow(clippy::blacklisted_name)] -#![allow(clippy::explicit_iter_loop)] -#![allow(clippy::redundant_closure)] -#![warn(clippy::needless_borrow)] -#![warn(clippy::redundant_clone)] - -#[macro_use] -extern crate cargo_test_macro; - -#[macro_use] -mod support; - -mod alt_registry; -mod bad_config; -mod bad_manifest_path; -mod bench; -mod build; -mod build_auth; -mod build_lib; -mod build_plan; -mod build_script; -mod build_script_env; -mod cache_messages; -mod cargo_alias_config; -mod cargo_command; -mod cargo_features; -mod cfg; -mod check; -mod clean; -mod clippy; -mod collisions; -mod concurrent; -mod config; -mod corrupt_git; -mod cross_compile; -mod cross_publish; -mod custom_target; -mod death; -mod dep_info; -mod directory; -mod doc; -mod edition; -mod features; -mod fetch; -mod fix; -mod freshness; -mod generate_lockfile; -mod git; -mod init; -mod install; -mod install_upgrade; -mod jobserver; -mod list_targets; -mod local_registry; -mod lockfile_compat; -mod login; -mod member_errors; -mod metabuild; -mod metadata; -mod net_config; -mod new; -mod offline; -mod out_dir; -mod overrides; -mod package; -mod patch; -mod path; -mod plugins; -mod proc_macro; -mod profile_config; -mod profile_overrides; -mod profile_targets; -mod profiles; -mod pub_priv; -mod publish; -mod publish_lockfile; -mod read_manifest; -mod registry; -mod rename_deps; -mod required_features; -mod resolve; -mod run; -mod rustc; -mod rustc_info_cache; -mod rustdoc; -mod rustdocflags; -mod rustflags; -mod search; -mod shell_quoting; -mod small_fd_limits; -mod test; -mod tool_paths; -mod update; -mod vendor; -mod verify_project; -mod version; -mod warn_on_failure; -mod workspaces; - -#[cargo_test] -fn aaa_trigger_cross_compile_disabled_check() { - // This triggers the cross compile disabled check to run ASAP, see #5141 - support::cross_compile::disabled(); -} +#![warn(rust_2018_idioms)] // while we're getting used to 2018 +#![cfg_attr(feature = "deny-warnings", deny(warnings))] +#![allow(clippy::blacklisted_name)] +#![allow(clippy::explicit_iter_loop)] +#![allow(clippy::redundant_closure)] +#![warn(clippy::needless_borrow)] +#![warn(clippy::redundant_clone)] + +#[macro_use] +extern crate cargo_test_macro; + +#[macro_use] +mod support; + +mod alt_registry; +mod bad_config; +mod bad_manifest_path; +mod bench; +mod build; +mod build_auth; +mod build_lib; +mod build_plan; +mod build_script; +mod build_script_env; +mod cache_messages; +mod cargo_alias_config; +mod cargo_command; +mod cargo_features; +mod cfg; +mod cfg_features; +mod check; +mod clean; +mod clippy; +mod collisions; +mod concurrent; +mod config; +mod corrupt_git; +mod cross_compile; +mod cross_publish; +mod custom_target; +mod death; +mod dep_info; +mod directory; +mod doc; +mod edition; +mod features; +mod fetch; +mod fix; +mod freshness; +mod generate_lockfile; +mod git; +mod init; +mod install; +mod install_upgrade; +mod jobserver; +mod list_targets; +mod local_registry; +mod lockfile_compat; +mod login; +mod member_errors; +mod metabuild; +mod metadata; +mod net_config; +mod new; +mod offline; +mod out_dir; +mod overrides; +mod package; +mod patch; +mod path; +mod plugins; +mod proc_macro; +mod profile_config; +mod profile_overrides; +mod profile_targets; +mod profiles; +mod pub_priv; +mod publish; +mod publish_lockfile; +mod read_manifest; +mod registry; +mod rename_deps; +mod required_features; +mod resolve; +mod run; +mod rustc; +mod rustc_info_cache; +mod rustdoc; +mod rustdocflags; +mod rustflags; +mod search; +mod shell_quoting; +mod small_fd_limits; +mod test; +mod tool_paths; +mod update; +mod vendor; +mod verify_project; +mod version; +mod warn_on_failure; +mod workspaces; + +#[cargo_test] +fn aaa_trigger_cross_compile_disabled_check() { + // This triggers the cross compile disabled check to run ASAP, see #5141 + support::cross_compile::disabled(); +} diff --git a/tests/testsuite/metadata.rs b/tests/testsuite/metadata.rs index 10f5636c67d..f6b1dbf6455 100644 --- a/tests/testsuite/metadata.rs +++ b/tests/testsuite/metadata.rs @@ -1,1717 +1,1726 @@ -use crate::support::registry::Package; -use crate::support::{basic_bin_manifest, basic_lib_manifest, main_file, project}; - -#[cargo_test] -fn cargo_metadata_simple() { - let p = project() - .file("src/foo.rs", "") - .file("Cargo.toml", &basic_bin_manifest("foo")) - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name": "foo", - "version": "0.5.0", - "id": "foo[..]", - "keywords": [], - "source": null, - "dependencies": [], - "edition": "2015", - "license": null, - "license_file": null, - "links": null, - "description": null, - "readme": null, - "repository": null, - "targets": [ - { - "kind": [ - "bin" - ], - "crate_types": [ - "bin" - ], - "doctest": false, - "edition": "2015", - "name": "foo", - "src_path": "[..]/foo/src/foo.rs" - } - ], - "features": {}, - "manifest_path": "[..]Cargo.toml", - "metadata": null - } - ], - "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foo 0.5.0 (path+file:[..]foo)" - } - ], - "root": "foo 0.5.0 (path+file:[..]foo)" - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_warns_on_implicit_version() { - let p = project() - .file("src/foo.rs", "") - .file("Cargo.toml", &basic_bin_manifest("foo")) - .build(); - - p.cargo("metadata").with_stderr("[WARNING] please specify `--format-version` flag explicitly to avoid compatibility problems").run(); - - p.cargo("metadata --format-version 1").with_stderr("").run(); -} - -#[cargo_test] -fn library_with_several_crate_types() { - let p = project() - .file("src/lib.rs", "") - .file( - "Cargo.toml", - r#" -[package] -name = "foo" -version = "0.5.0" - -[lib] -crate-type = ["lib", "staticlib"] - "#, - ) - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [], - "categories": [], - "name": "foo", - "readme": null, - "repository": null, - "version": "0.5.0", - "id": "foo[..]", - "keywords": [], - "source": null, - "dependencies": [], - "edition": "2015", - "license": null, - "license_file": null, - "links": null, - "description": null, - "targets": [ - { - "kind": [ - "lib", - "staticlib" - ], - "crate_types": [ - "lib", - "staticlib" - ], - "doctest": true, - "edition": "2015", - "name": "foo", - "src_path": "[..]/foo/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]Cargo.toml", - "metadata": null - } - ], - "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foo 0.5.0 (path+file:[..]foo)" - } - ], - "root": "foo 0.5.0 (path+file:[..]foo)" - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn library_with_features() { - let p = project() - .file("src/lib.rs", "") - .file( - "Cargo.toml", - r#" -[package] -name = "foo" -version = "0.5.0" - -[features] -default = ["default_feat"] -default_feat = [] -optional_feat = [] - "#, - ) - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [], - "categories": [], - "name": "foo", - "readme": null, - "repository": null, - "version": "0.5.0", - "id": "foo[..]", - "keywords": [], - "source": null, - "dependencies": [], - "edition": "2015", - "license": null, - "license_file": null, - "links": null, - "description": null, - "targets": [ - { - "kind": [ - "lib" - ], - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "name": "foo", - "src_path": "[..]/foo/src/lib.rs" - } - ], - "features": { - "default": [ - "default_feat" - ], - "default_feat": [], - "optional_feat": [] - }, - "manifest_path": "[..]Cargo.toml", - "metadata": null - } - ], - "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [ - "default", - "default_feat" - ], - "id": "foo 0.5.0 (path+file:[..]foo)" - } - ], - "root": "foo 0.5.0 (path+file:[..]foo)" - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_with_deps_and_version() { - let p = project() - .file("src/foo.rs", "") - .file( - "Cargo.toml", - r#" - [project] - name = "foo" - version = "0.5.0" - authors = [] - license = "MIT" - description = "foo" - - [[bin]] - name = "foo" - - [dependencies] - bar = "*" - [dev-dependencies] - foobar = "*" - "#, - ) - .build(); - Package::new("baz", "0.0.1").publish(); - Package::new("foobar", "0.0.1").publish(); - Package::new("bar", "0.0.1").dep("baz", "0.0.1").publish(); - - p.cargo("metadata -q --format-version 1") - .with_json( - r#" - { - "packages": [ - { - "authors": [], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "baz", - "readme": null, - "repository": null, - "source": "registry+https://github.com/rust-lang/crates.io-index", - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "baz", - "src_path": "[..]src/lib.rs" - } - ], - "version": "0.0.1" - }, - { - "authors": [], - "categories": [], - "dependencies": [ - { - "features": [], - "kind": null, - "name": "bar", - "optional": false, - "registry": null, - "rename": null, - "req": "*", - "source": "registry+https://github.com/rust-lang/crates.io-index", - "target": null, - "uses_default_features": true - }, - { - "features": [], - "kind": "dev", - "name": "foobar", - "optional": false, - "registry": null, - "rename": null, - "req": "*", - "source": "registry+https://github.com/rust-lang/crates.io-index", - "target": null, - "uses_default_features": true - } - ], - "description": "foo", - "edition": "2015", - "features": {}, - "id": "foo 0.5.0 (path+file:[..]foo)", - "keywords": [], - "license": "MIT", - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "foo", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "bin" - ], - "doctest": false, - "edition": "2015", - "kind": [ - "bin" - ], - "name": "foo", - "src_path": "[..]src/foo.rs" - } - ], - "version": "0.5.0" - }, - { - "authors": [], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "foobar", - "readme": null, - "repository": null, - "source": "registry+https://github.com/rust-lang/crates.io-index", - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "foobar", - "src_path": "[..]src/lib.rs" - } - ], - "version": "0.0.1" - }, - { - "authors": [], - "categories": [], - "dependencies": [ - { - "features": [], - "kind": null, - "name": "baz", - "optional": false, - "registry": null, - "rename": null, - "req": "^0.0.1", - "source": "registry+https://github.com/rust-lang/crates.io-index", - "target": null, - "uses_default_features": true - } - ], - "description": null, - "edition": "2015", - "features": {}, - "id": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "bar", - "readme": null, - "repository": null, - "source": "registry+https://github.com/rust-lang/crates.io-index", - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "bar", - "src_path": "[..]src/lib.rs" - } - ], - "version": "0.0.1" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "dependencies": [ - "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - ], - "deps": [ - { - "name": "bar", - "pkg": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "name": "foobar", - "pkg": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - } - ], - "features": [], - "id": "foo 0.5.0 (path+file:[..]foo)" - }, - { - "dependencies": [ - "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - ], - "deps": [ - { - "name": "baz", - "pkg": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - } - ], - "features": [], - "id": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" - } - ], - "root": "foo 0.5.0 (path+file:[..]foo)" - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_members": [ - "foo 0.5.0 (path+file:[..]foo)" - ], - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn example() { - let p = project() - .file("src/lib.rs", "") - .file("examples/ex.rs", "") - .file( - "Cargo.toml", - r#" -[package] -name = "foo" -version = "0.1.0" - -[[example]] -name = "ex" - "#, - ) - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [], - "categories": [], - "name": "foo", - "readme": null, - "repository": null, - "version": "0.1.0", - "id": "foo[..]", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "source": null, - "dependencies": [], - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "foo", - "src_path": "[..]/foo/src/lib.rs" - }, - { - "kind": [ "example" ], - "crate_types": [ "bin" ], - "doctest": false, - "edition": "2015", - "name": "ex", - "src_path": "[..]/foo/examples/ex.rs" - } - ], - "features": {}, - "manifest_path": "[..]Cargo.toml", - "metadata": null - } - ], - "workspace_members": [ - "foo 0.1.0 (path+file:[..]foo)" - ], - "resolve": { - "root": "foo 0.1.0 (path+file://[..]foo)", - "nodes": [ - { - "id": "foo 0.1.0 (path+file:[..]foo)", - "features": [], - "dependencies": [], - "deps": [] - } - ] - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn example_lib() { - let p = project() - .file("src/lib.rs", "") - .file("examples/ex.rs", "") - .file( - "Cargo.toml", - r#" -[package] -name = "foo" -version = "0.1.0" - -[[example]] -name = "ex" -crate-type = ["rlib", "dylib"] - "#, - ) - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [], - "categories": [], - "name": "foo", - "readme": null, - "repository": null, - "version": "0.1.0", - "id": "foo[..]", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "source": null, - "dependencies": [], - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "foo", - "src_path": "[..]/foo/src/lib.rs" - }, - { - "kind": [ "example" ], - "crate_types": [ "rlib", "dylib" ], - "doctest": false, - "edition": "2015", - "name": "ex", - "src_path": "[..]/foo/examples/ex.rs" - } - ], - "features": {}, - "manifest_path": "[..]Cargo.toml", - "metadata": null - } - ], - "workspace_members": [ - "foo 0.1.0 (path+file:[..]foo)" - ], - "resolve": { - "root": "foo 0.1.0 (path+file://[..]foo)", - "nodes": [ - { - "id": "foo 0.1.0 (path+file:[..]foo)", - "features": [], - "dependencies": [], - "deps": [] - } - ] - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn workspace_metadata() { - let p = project() - .file( - "Cargo.toml", - r#" - [workspace] - members = ["bar", "baz"] - "#, - ) - .file("bar/Cargo.toml", &basic_lib_manifest("bar")) - .file("bar/src/lib.rs", "") - .file("baz/Cargo.toml", &basic_lib_manifest("baz")) - .file("baz/src/lib.rs", "") - .build(); - - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name": "bar", - "version": "0.5.0", - "id": "bar[..]", - "readme": null, - "repository": null, - "keywords": [], - "source": null, - "dependencies": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "bar", - "src_path": "[..]bar/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]bar/Cargo.toml", - "metadata": null - }, - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name": "baz", - "readme": null, - "repository": null, - "version": "0.5.0", - "id": "baz[..]", - "keywords": [], - "source": null, - "dependencies": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "baz", - "src_path": "[..]baz/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]baz/Cargo.toml", - "metadata": null - } - ], - "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "baz 0.5.0 (path+file:[..]baz)" - }, - { - "dependencies": [], - "deps": [], - "features": [], - "id": "bar 0.5.0 (path+file:[..]bar)" - } - ], - "root": null - }, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn workspace_metadata_no_deps() { - let p = project() - .file( - "Cargo.toml", - r#" - [workspace] - members = ["bar", "baz"] - "#, - ) - .file("bar/Cargo.toml", &basic_lib_manifest("bar")) - .file("bar/src/lib.rs", "") - .file("baz/Cargo.toml", &basic_lib_manifest("baz")) - .file("baz/src/lib.rs", "") - .build(); - - p.cargo("metadata --no-deps") - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name": "bar", - "readme": null, - "repository": null, - "version": "0.5.0", - "id": "bar[..]", - "keywords": [], - "source": null, - "dependencies": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "bar", - "src_path": "[..]bar/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]bar/Cargo.toml", - "metadata": null - }, - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name": "baz", - "readme": null, - "repository": null, - "version": "0.5.0", - "id": "baz[..]", - "keywords": [], - "source": null, - "dependencies": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "targets": [ - { - "kind": [ "lib" ], - "crate_types": ["lib"], - "doctest": true, - "edition": "2015", - "name": "baz", - "src_path": "[..]baz/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]baz/Cargo.toml", - "metadata": null - } - ], - "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], - "resolve": null, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_with_invalid_manifest() { - let p = project().file("Cargo.toml", "").build(); - - p.cargo("metadata --format-version 1") - .with_status(101) - .with_stderr( - "\ -[ERROR] failed to parse manifest at `[..]` - -Caused by: - virtual manifests must be configured with [workspace]", - ) - .run(); -} - -const MANIFEST_OUTPUT: &str = r#" -{ - "packages": [{ - "authors": [ - "wycats@example.com" - ], - "categories": [], - "name":"foo", - "version":"0.5.0", - "id":"foo[..]0.5.0[..](path+file://[..]/foo)", - "source":null, - "dependencies":[], - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "description": null, - "edition": "2015", - "targets":[{ - "kind":["bin"], - "crate_types":["bin"], - "doctest": false, - "edition": "2015", - "name":"foo", - "src_path":"[..]/foo/src/foo.rs" - }], - "features":{}, - "manifest_path":"[..]Cargo.toml", - "metadata": null, - "readme": null, - "repository": null - }], - "workspace_members": [ "foo 0.5.0 (path+file:[..]foo)" ], - "resolve": null, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" -}"#; - -#[cargo_test] -fn cargo_metadata_no_deps_path_to_cargo_toml_relative() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps --manifest-path foo/Cargo.toml") - .cwd(p.root().parent().unwrap()) - .with_json(MANIFEST_OUTPUT) - .run(); -} - -#[cargo_test] -fn cargo_metadata_no_deps_path_to_cargo_toml_absolute() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps --manifest-path") - .arg(p.root().join("Cargo.toml")) - .cwd(p.root().parent().unwrap()) - .with_json(MANIFEST_OUTPUT) - .run(); -} - -#[cargo_test] -fn cargo_metadata_no_deps_path_to_cargo_toml_parent_relative() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps --manifest-path foo") - .cwd(p.root().parent().unwrap()) - .with_status(101) - .with_stderr( - "[ERROR] the manifest-path must be \ - a path to a Cargo.toml file", - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_no_deps_path_to_cargo_toml_parent_absolute() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps --manifest-path") - .arg(p.root()) - .cwd(p.root().parent().unwrap()) - .with_status(101) - .with_stderr( - "[ERROR] the manifest-path must be \ - a path to a Cargo.toml file", - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_no_deps_cwd() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps") - .with_json(MANIFEST_OUTPUT) - .run(); -} - -#[cargo_test] -fn cargo_metadata_bad_version() { - let p = project() - .file("Cargo.toml", &basic_bin_manifest("foo")) - .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) - .build(); - - p.cargo("metadata --no-deps --format-version 2") - .with_status(1) - .with_stderr_contains( - "\ -error: '2' isn't a valid value for '--format-version ' -[possible values: 1] -", - ) - .run(); -} - -#[cargo_test] -fn multiple_features() { - let p = project() - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - authors = [] - - [features] - a = [] - b = [] - "#, - ) - .file("src/lib.rs", "") - .build(); - - p.cargo("metadata --features").arg("a b").run(); -} - -#[cargo_test] -fn package_metadata() { - let p = project() - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - authors = ["wycats@example.com"] - categories = ["database"] - keywords = ["database"] - readme = "README.md" - repository = "https://github.com/rust-lang/cargo" - - [package.metadata.bar] - baz = "quux" - "#, - ) - .file("src/lib.rs", "") - .build(); - - p.cargo("metadata --no-deps") - .with_json( - r#" - { - "packages": [ - { - "authors": ["wycats@example.com"], - "categories": ["database"], - "name": "foo", - "readme": "README.md", - "repository": "https://github.com/rust-lang/cargo", - "version": "0.1.0", - "id": "foo[..]", - "keywords": ["database"], - "source": null, - "dependencies": [], - "edition": "2015", - "license": null, - "license_file": null, - "links": null, - "description": null, - "targets": [ - { - "kind": [ "lib" ], - "crate_types": [ "lib" ], - "doctest": true, - "edition": "2015", - "name": "foo", - "src_path": "[..]foo/src/lib.rs" - } - ], - "features": {}, - "manifest_path": "[..]foo/Cargo.toml", - "metadata": { - "bar": { - "baz": "quux" - } - } - } - ], - "workspace_members": ["foo[..]"], - "resolve": null, - "target_directory": "[..]foo/target", - "version": 1, - "workspace_root": "[..]/foo" - }"#, - ) - .run(); -} - -#[cargo_test] -fn cargo_metadata_path_to_cargo_toml_project() { - let p = project() - .file( - "Cargo.toml", - r#" - [workspace] - members = ["bar"] - "#, - ) - .file("bar/Cargo.toml", &basic_lib_manifest("bar")) - .file("bar/src/lib.rs", "") - .build(); - - p.cargo("package --manifest-path") - .arg(p.root().join("bar/Cargo.toml")) - .cwd(p.root().parent().unwrap()) - .run(); - - p.cargo("metadata --manifest-path") - .arg(p.root().join("target/package/bar-0.5.0/Cargo.toml")) - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "bar 0.5.0 ([..])", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "bar", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "bar", - "src_path": "[..]src/lib.rs" - } - ], - "version": "0.5.0" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "bar 0.5.0 ([..])" - } - ], - "root": "bar 0.5.0 (path+file:[..])" - }, - "target_directory": "[..]", - "version": 1, - "workspace_members": [ - "bar 0.5.0 (path+file:[..])" - ], - "workspace_root": "[..]" - } -"#, - ) - .run(); -} - -#[cargo_test] -fn package_edition_2018() { - let p = project() - .file("src/lib.rs", "") - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - authors = ["wycats@example.com"] - edition = "2018" - "#, - ) - .build(); - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2018", - "features": {}, - "id": "foo 0.1.0 (path+file:[..])", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "foo", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2018", - "kind": [ - "lib" - ], - "name": "foo", - "src_path": "[..]src/lib.rs" - } - ], - "version": "0.1.0" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foo 0.1.0 (path+file:[..])" - } - ], - "root": "foo 0.1.0 (path+file:[..])" - }, - "target_directory": "[..]", - "version": 1, - "workspace_members": [ - "foo 0.1.0 (path+file:[..])" - ], - "workspace_root": "[..]" - } - "#, - ) - .run(); -} - -#[cargo_test] -fn target_edition_2018() { - let p = project() - .file("src/lib.rs", "") - .file("src/main.rs", "") - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - authors = ["wycats@example.com"] - edition = "2015" - - [lib] - edition = "2018" - "#, - ) - .build(); - p.cargo("metadata") - .with_json( - r#" - { - "packages": [ - { - "authors": [ - "wycats@example.com" - ], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "foo 0.1.0 (path+file:[..])", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]Cargo.toml", - "metadata": null, - "name": "foo", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2018", - "kind": [ - "lib" - ], - "name": "foo", - "src_path": "[..]src/lib.rs" - }, - { - "crate_types": [ - "bin" - ], - "doctest": false, - "edition": "2015", - "kind": [ - "bin" - ], - "name": "foo", - "src_path": "[..]src/main.rs" - } - ], - "version": "0.1.0" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foo 0.1.0 (path+file:[..])" - } - ], - "root": "foo 0.1.0 (path+file:[..])" - }, - "target_directory": "[..]", - "version": 1, - "workspace_members": [ - "foo 0.1.0 (path+file:[..])" - ], - "workspace_root": "[..]" - } - "#, - ) - .run(); -} - -#[cargo_test] -fn rename_dependency() { - Package::new("bar", "0.1.0").publish(); - Package::new("bar", "0.2.0").publish(); - - let p = project() - .file( - "Cargo.toml", - r#" - [project] - name = "foo" - version = "0.0.1" - authors = [] - - [dependencies] - bar = { version = "0.1.0" } - baz = { version = "0.2.0", package = "bar" } - "#, - ) - .file("src/lib.rs", "extern crate bar; extern crate baz;") - .build(); - - p.cargo("metadata") - .with_json( - r#" -{ - "packages": [ - { - "authors": [], - "categories": [], - "dependencies": [ - { - "features": [], - "kind": null, - "name": "bar", - "optional": false, - "rename": null, - "registry": null, - "req": "^0.1.0", - "source": "registry+https://github.com/rust-lang/crates.io-index", - "target": null, - "uses_default_features": true - }, - { - "features": [], - "kind": null, - "name": "bar", - "optional": false, - "rename": "baz", - "registry": null, - "req": "^0.2.0", - "source": "registry+https://github.com/rust-lang/crates.io-index", - "target": null, - "uses_default_features": true - } - ], - "description": null, - "edition": "2015", - "features": {}, - "id": "foo 0.0.1[..]", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]", - "metadata": null, - "name": "foo", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "foo", - "src_path": "[..]" - } - ], - "version": "0.0.1" - }, - { - "authors": [], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]", - "metadata": null, - "name": "bar", - "readme": null, - "repository": null, - "source": "registry+https://github.com/rust-lang/crates.io-index", - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "bar", - "src_path": "[..]" - } - ], - "version": "0.1.0" - }, - { - "authors": [], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "keywords": [], - "license": null, - "license_file": null, - "links": null, - "manifest_path": "[..]", - "metadata": null, - "name": "bar", - "readme": null, - "repository": null, - "source": "registry+https://github.com/rust-lang/crates.io-index", - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "bar", - "src_path": "[..]" - } - ], - "version": "0.2.0" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "dependencies": [], - "deps": [], - "features": [], - "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "dependencies": [ - "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" - ], - "deps": [ - { - "name": "bar", - "pkg": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" - }, - { - "name": "baz", - "pkg": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" - } - ], - "features": [], - "id": "foo 0.0.1[..]" - } - ], - "root": "foo 0.0.1[..]" - }, - "target_directory": "[..]", - "version": 1, - "workspace_members": [ - "foo 0.0.1[..]" - ], - "workspace_root": "[..]" -}"#, - ) - .run(); -} - -#[cargo_test] -fn metadata_links() { - let p = project() - .file( - "Cargo.toml", - r#" - [project] - name = "foo" - version = "0.5.0" - links = "a" - "#, - ) - .file("src/lib.rs", "") - .file("build.rs", "fn main() {}") - .build(); - - p.cargo("metadata") - .with_json( - r#" -{ - "packages": [ - { - "authors": [], - "categories": [], - "dependencies": [], - "description": null, - "edition": "2015", - "features": {}, - "id": "foo 0.5.0 [..]", - "keywords": [], - "license": null, - "license_file": null, - "links": "a", - "manifest_path": "[..]/foo/Cargo.toml", - "metadata": null, - "name": "foo", - "readme": null, - "repository": null, - "source": null, - "targets": [ - { - "crate_types": [ - "lib" - ], - "doctest": true, - "edition": "2015", - "kind": [ - "lib" - ], - "name": "foo", - "src_path": "[..]/foo/src/lib.rs" - }, - { - "crate_types": [ - "bin" - ], - "doctest": false, - "edition": "2015", - "kind": [ - "custom-build" - ], - "name": "build-script-build", - "src_path": "[..]/foo/build.rs" - } - ], - "version": "0.5.0" - } - ], - "resolve": { - "nodes": [ - { - "dependencies": [], - "deps": [], - "features": [], - "id": "foo 0.5.0 [..]" - } - ], - "root": "foo 0.5.0 [..]" - }, - "target_directory": "[..]/foo/target", - "version": 1, - "workspace_members": [ - "foo 0.5.0 [..]" - ], - "workspace_root": "[..]/foo" -} -"#, - ) - .run() -} - -#[cargo_test] -fn deps_with_bin_only() { - let p = project() - .file( - "Cargo.toml", - r#" - [package] - name = "foo" - version = "0.1.0" - [dependencies] - bdep = { path = "bdep" } - "#, - ) - .file("src/lib.rs", "") - .file("bdep/Cargo.toml", &basic_bin_manifest("bdep")) - .file("bdep/src/main.rs", "fn main() {}") - .build(); - - let output = p - .cargo("metadata") - .exec_with_output() - .expect("cargo metadata failed"); - let stdout = std::str::from_utf8(&output.stdout).unwrap(); - let meta: serde_json::Value = serde_json::from_str(stdout).expect("failed to parse json"); - let nodes = &meta["resolve"]["nodes"]; - assert!(nodes[0]["deps"].as_array().unwrap().is_empty()); - assert!(nodes[1]["deps"].as_array().unwrap().is_empty()); -} +use crate::support::registry::Package; +use crate::support::{basic_bin_manifest, basic_lib_manifest, main_file, project}; + +#[cargo_test] +fn cargo_metadata_simple() { + let p = project() + .file("src/foo.rs", "") + .file("Cargo.toml", &basic_bin_manifest("foo")) + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name": "foo", + "version": "0.5.0", + "id": "foo[..]", + "keywords": [], + "source": null, + "dependencies": [], + "edition": "2015", + "license": null, + "license_file": null, + "links": null, + "description": null, + "readme": null, + "repository": null, + "targets": [ + { + "kind": [ + "bin" + ], + "crate_types": [ + "bin" + ], + "doctest": false, + "edition": "2015", + "name": "foo", + "src_path": "[..]/foo/src/foo.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml", + "metadata": null + } + ], + "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foo 0.5.0 (path+file:[..]foo)" + } + ], + "root": "foo 0.5.0 (path+file:[..]foo)" + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_warns_on_implicit_version() { + let p = project() + .file("src/foo.rs", "") + .file("Cargo.toml", &basic_bin_manifest("foo")) + .build(); + + p.cargo("metadata").with_stderr("[WARNING] please specify `--format-version` flag explicitly to avoid compatibility problems").run(); + + p.cargo("metadata --format-version 1").with_stderr("").run(); +} + +#[cargo_test] +fn library_with_several_crate_types() { + let p = project() + .file("src/lib.rs", "") + .file( + "Cargo.toml", + r#" +[package] +name = "foo" +version = "0.5.0" + +[lib] +crate-type = ["lib", "staticlib"] + "#, + ) + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [], + "categories": [], + "name": "foo", + "readme": null, + "repository": null, + "version": "0.5.0", + "id": "foo[..]", + "keywords": [], + "source": null, + "dependencies": [], + "edition": "2015", + "license": null, + "license_file": null, + "links": null, + "description": null, + "targets": [ + { + "kind": [ + "lib", + "staticlib" + ], + "crate_types": [ + "lib", + "staticlib" + ], + "doctest": true, + "edition": "2015", + "name": "foo", + "src_path": "[..]/foo/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml", + "metadata": null + } + ], + "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foo 0.5.0 (path+file:[..]foo)" + } + ], + "root": "foo 0.5.0 (path+file:[..]foo)" + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn library_with_features() { + let p = project() + .file("src/lib.rs", "") + .file( + "Cargo.toml", + r#" +[package] +name = "foo" +version = "0.5.0" + +[features] +default = ["default_feat"] +default_feat = [] +optional_feat = [] + "#, + ) + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [], + "categories": [], + "name": "foo", + "readme": null, + "repository": null, + "version": "0.5.0", + "id": "foo[..]", + "keywords": [], + "source": null, + "dependencies": [], + "edition": "2015", + "license": null, + "license_file": null, + "links": null, + "description": null, + "targets": [ + { + "kind": [ + "lib" + ], + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "name": "foo", + "src_path": "[..]/foo/src/lib.rs" + } + ], + "features": { + "default": [ + null, + [ + "default_feat" + ] + ], + "default_feat": [ + null, + [] + ], + "optional_feat": [ + null, + [] + ] + }, + "manifest_path": "[..]Cargo.toml", + "metadata": null + } + ], + "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [ + "default", + "default_feat" + ], + "id": "foo 0.5.0 (path+file:[..]foo)" + } + ], + "root": "foo 0.5.0 (path+file:[..]foo)" + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_with_deps_and_version() { + let p = project() + .file("src/foo.rs", "") + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.5.0" + authors = [] + license = "MIT" + description = "foo" + + [[bin]] + name = "foo" + + [dependencies] + bar = "*" + [dev-dependencies] + foobar = "*" + "#, + ) + .build(); + Package::new("baz", "0.0.1").publish(); + Package::new("foobar", "0.0.1").publish(); + Package::new("bar", "0.0.1").dep("baz", "0.0.1").publish(); + + p.cargo("metadata -q --format-version 1") + .with_json( + r#" + { + "packages": [ + { + "authors": [], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "baz", + "readme": null, + "repository": null, + "source": "registry+https://github.com/rust-lang/crates.io-index", + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "baz", + "src_path": "[..]src/lib.rs" + } + ], + "version": "0.0.1" + }, + { + "authors": [], + "categories": [], + "dependencies": [ + { + "features": [], + "kind": null, + "name": "bar", + "optional": false, + "registry": null, + "rename": null, + "req": "*", + "source": "registry+https://github.com/rust-lang/crates.io-index", + "target": null, + "uses_default_features": true + }, + { + "features": [], + "kind": "dev", + "name": "foobar", + "optional": false, + "registry": null, + "rename": null, + "req": "*", + "source": "registry+https://github.com/rust-lang/crates.io-index", + "target": null, + "uses_default_features": true + } + ], + "description": "foo", + "edition": "2015", + "features": {}, + "id": "foo 0.5.0 (path+file:[..]foo)", + "keywords": [], + "license": "MIT", + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "foo", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "bin" + ], + "doctest": false, + "edition": "2015", + "kind": [ + "bin" + ], + "name": "foo", + "src_path": "[..]src/foo.rs" + } + ], + "version": "0.5.0" + }, + { + "authors": [], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "foobar", + "readme": null, + "repository": null, + "source": "registry+https://github.com/rust-lang/crates.io-index", + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "foobar", + "src_path": "[..]src/lib.rs" + } + ], + "version": "0.0.1" + }, + { + "authors": [], + "categories": [], + "dependencies": [ + { + "features": [], + "kind": null, + "name": "baz", + "optional": false, + "registry": null, + "rename": null, + "req": "^0.0.1", + "source": "registry+https://github.com/rust-lang/crates.io-index", + "target": null, + "uses_default_features": true + } + ], + "description": null, + "edition": "2015", + "features": {}, + "id": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "bar", + "readme": null, + "repository": null, + "source": "registry+https://github.com/rust-lang/crates.io-index", + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "bar", + "src_path": "[..]src/lib.rs" + } + ], + "version": "0.0.1" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "dependencies": [ + "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + ], + "deps": [ + { + "name": "bar", + "pkg": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "name": "foobar", + "pkg": "foobar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + } + ], + "features": [], + "id": "foo 0.5.0 (path+file:[..]foo)" + }, + { + "dependencies": [ + "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + ], + "deps": [ + { + "name": "baz", + "pkg": "baz 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + } + ], + "features": [], + "id": "bar 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" + } + ], + "root": "foo 0.5.0 (path+file:[..]foo)" + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_members": [ + "foo 0.5.0 (path+file:[..]foo)" + ], + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn example() { + let p = project() + .file("src/lib.rs", "") + .file("examples/ex.rs", "") + .file( + "Cargo.toml", + r#" +[package] +name = "foo" +version = "0.1.0" + +[[example]] +name = "ex" + "#, + ) + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [], + "categories": [], + "name": "foo", + "readme": null, + "repository": null, + "version": "0.1.0", + "id": "foo[..]", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "source": null, + "dependencies": [], + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "foo", + "src_path": "[..]/foo/src/lib.rs" + }, + { + "kind": [ "example" ], + "crate_types": [ "bin" ], + "doctest": false, + "edition": "2015", + "name": "ex", + "src_path": "[..]/foo/examples/ex.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml", + "metadata": null + } + ], + "workspace_members": [ + "foo 0.1.0 (path+file:[..]foo)" + ], + "resolve": { + "root": "foo 0.1.0 (path+file://[..]foo)", + "nodes": [ + { + "id": "foo 0.1.0 (path+file:[..]foo)", + "features": [], + "dependencies": [], + "deps": [] + } + ] + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn example_lib() { + let p = project() + .file("src/lib.rs", "") + .file("examples/ex.rs", "") + .file( + "Cargo.toml", + r#" +[package] +name = "foo" +version = "0.1.0" + +[[example]] +name = "ex" +crate-type = ["rlib", "dylib"] + "#, + ) + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [], + "categories": [], + "name": "foo", + "readme": null, + "repository": null, + "version": "0.1.0", + "id": "foo[..]", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "source": null, + "dependencies": [], + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "foo", + "src_path": "[..]/foo/src/lib.rs" + }, + { + "kind": [ "example" ], + "crate_types": [ "rlib", "dylib" ], + "doctest": false, + "edition": "2015", + "name": "ex", + "src_path": "[..]/foo/examples/ex.rs" + } + ], + "features": {}, + "manifest_path": "[..]Cargo.toml", + "metadata": null + } + ], + "workspace_members": [ + "foo 0.1.0 (path+file:[..]foo)" + ], + "resolve": { + "root": "foo 0.1.0 (path+file://[..]foo)", + "nodes": [ + { + "id": "foo 0.1.0 (path+file:[..]foo)", + "features": [], + "dependencies": [], + "deps": [] + } + ] + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn workspace_metadata() { + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar", "baz"] + "#, + ) + .file("bar/Cargo.toml", &basic_lib_manifest("bar")) + .file("bar/src/lib.rs", "") + .file("baz/Cargo.toml", &basic_lib_manifest("baz")) + .file("baz/src/lib.rs", "") + .build(); + + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name": "bar", + "version": "0.5.0", + "id": "bar[..]", + "readme": null, + "repository": null, + "keywords": [], + "source": null, + "dependencies": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "bar", + "src_path": "[..]bar/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]bar/Cargo.toml", + "metadata": null + }, + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name": "baz", + "readme": null, + "repository": null, + "version": "0.5.0", + "id": "baz[..]", + "keywords": [], + "source": null, + "dependencies": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "baz", + "src_path": "[..]baz/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]baz/Cargo.toml", + "metadata": null + } + ], + "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "baz 0.5.0 (path+file:[..]baz)" + }, + { + "dependencies": [], + "deps": [], + "features": [], + "id": "bar 0.5.0 (path+file:[..]bar)" + } + ], + "root": null + }, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn workspace_metadata_no_deps() { + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar", "baz"] + "#, + ) + .file("bar/Cargo.toml", &basic_lib_manifest("bar")) + .file("bar/src/lib.rs", "") + .file("baz/Cargo.toml", &basic_lib_manifest("baz")) + .file("baz/src/lib.rs", "") + .build(); + + p.cargo("metadata --no-deps") + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name": "bar", + "readme": null, + "repository": null, + "version": "0.5.0", + "id": "bar[..]", + "keywords": [], + "source": null, + "dependencies": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "bar", + "src_path": "[..]bar/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]bar/Cargo.toml", + "metadata": null + }, + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name": "baz", + "readme": null, + "repository": null, + "version": "0.5.0", + "id": "baz[..]", + "keywords": [], + "source": null, + "dependencies": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "targets": [ + { + "kind": [ "lib" ], + "crate_types": ["lib"], + "doctest": true, + "edition": "2015", + "name": "baz", + "src_path": "[..]baz/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]baz/Cargo.toml", + "metadata": null + } + ], + "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], + "resolve": null, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_with_invalid_manifest() { + let p = project().file("Cargo.toml", "").build(); + + p.cargo("metadata --format-version 1") + .with_status(101) + .with_stderr( + "\ +[ERROR] failed to parse manifest at `[..]` + +Caused by: + virtual manifests must be configured with [workspace]", + ) + .run(); +} + +const MANIFEST_OUTPUT: &str = r#" +{ + "packages": [{ + "authors": [ + "wycats@example.com" + ], + "categories": [], + "name":"foo", + "version":"0.5.0", + "id":"foo[..]0.5.0[..](path+file://[..]/foo)", + "source":null, + "dependencies":[], + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "description": null, + "edition": "2015", + "targets":[{ + "kind":["bin"], + "crate_types":["bin"], + "doctest": false, + "edition": "2015", + "name":"foo", + "src_path":"[..]/foo/src/foo.rs" + }], + "features":{}, + "manifest_path":"[..]Cargo.toml", + "metadata": null, + "readme": null, + "repository": null + }], + "workspace_members": [ "foo 0.5.0 (path+file:[..]foo)" ], + "resolve": null, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" +}"#; + +#[cargo_test] +fn cargo_metadata_no_deps_path_to_cargo_toml_relative() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps --manifest-path foo/Cargo.toml") + .cwd(p.root().parent().unwrap()) + .with_json(MANIFEST_OUTPUT) + .run(); +} + +#[cargo_test] +fn cargo_metadata_no_deps_path_to_cargo_toml_absolute() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps --manifest-path") + .arg(p.root().join("Cargo.toml")) + .cwd(p.root().parent().unwrap()) + .with_json(MANIFEST_OUTPUT) + .run(); +} + +#[cargo_test] +fn cargo_metadata_no_deps_path_to_cargo_toml_parent_relative() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps --manifest-path foo") + .cwd(p.root().parent().unwrap()) + .with_status(101) + .with_stderr( + "[ERROR] the manifest-path must be \ + a path to a Cargo.toml file", + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_no_deps_path_to_cargo_toml_parent_absolute() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps --manifest-path") + .arg(p.root()) + .cwd(p.root().parent().unwrap()) + .with_status(101) + .with_stderr( + "[ERROR] the manifest-path must be \ + a path to a Cargo.toml file", + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_no_deps_cwd() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps") + .with_json(MANIFEST_OUTPUT) + .run(); +} + +#[cargo_test] +fn cargo_metadata_bad_version() { + let p = project() + .file("Cargo.toml", &basic_bin_manifest("foo")) + .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) + .build(); + + p.cargo("metadata --no-deps --format-version 2") + .with_status(1) + .with_stderr_contains( + "\ +error: '2' isn't a valid value for '--format-version ' +[possible values: 1] +", + ) + .run(); +} + +#[cargo_test] +fn multiple_features() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = [] + + [features] + a = [] + b = [] + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("metadata --features").arg("a b").run(); +} + +#[cargo_test] +fn package_metadata() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = ["wycats@example.com"] + categories = ["database"] + keywords = ["database"] + readme = "README.md" + repository = "https://github.com/rust-lang/cargo" + + [package.metadata.bar] + baz = "quux" + "#, + ) + .file("src/lib.rs", "") + .build(); + + p.cargo("metadata --no-deps") + .with_json( + r#" + { + "packages": [ + { + "authors": ["wycats@example.com"], + "categories": ["database"], + "name": "foo", + "readme": "README.md", + "repository": "https://github.com/rust-lang/cargo", + "version": "0.1.0", + "id": "foo[..]", + "keywords": ["database"], + "source": null, + "dependencies": [], + "edition": "2015", + "license": null, + "license_file": null, + "links": null, + "description": null, + "targets": [ + { + "kind": [ "lib" ], + "crate_types": [ "lib" ], + "doctest": true, + "edition": "2015", + "name": "foo", + "src_path": "[..]foo/src/lib.rs" + } + ], + "features": {}, + "manifest_path": "[..]foo/Cargo.toml", + "metadata": { + "bar": { + "baz": "quux" + } + } + } + ], + "workspace_members": ["foo[..]"], + "resolve": null, + "target_directory": "[..]foo/target", + "version": 1, + "workspace_root": "[..]/foo" + }"#, + ) + .run(); +} + +#[cargo_test] +fn cargo_metadata_path_to_cargo_toml_project() { + let p = project() + .file( + "Cargo.toml", + r#" + [workspace] + members = ["bar"] + "#, + ) + .file("bar/Cargo.toml", &basic_lib_manifest("bar")) + .file("bar/src/lib.rs", "") + .build(); + + p.cargo("package --manifest-path") + .arg(p.root().join("bar/Cargo.toml")) + .cwd(p.root().parent().unwrap()) + .run(); + + p.cargo("metadata --manifest-path") + .arg(p.root().join("target/package/bar-0.5.0/Cargo.toml")) + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "bar 0.5.0 ([..])", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "bar", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "bar", + "src_path": "[..]src/lib.rs" + } + ], + "version": "0.5.0" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "bar 0.5.0 ([..])" + } + ], + "root": "bar 0.5.0 (path+file:[..])" + }, + "target_directory": "[..]", + "version": 1, + "workspace_members": [ + "bar 0.5.0 (path+file:[..])" + ], + "workspace_root": "[..]" + } +"#, + ) + .run(); +} + +#[cargo_test] +fn package_edition_2018() { + let p = project() + .file("src/lib.rs", "") + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = ["wycats@example.com"] + edition = "2018" + "#, + ) + .build(); + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2018", + "features": {}, + "id": "foo 0.1.0 (path+file:[..])", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "foo", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2018", + "kind": [ + "lib" + ], + "name": "foo", + "src_path": "[..]src/lib.rs" + } + ], + "version": "0.1.0" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foo 0.1.0 (path+file:[..])" + } + ], + "root": "foo 0.1.0 (path+file:[..])" + }, + "target_directory": "[..]", + "version": 1, + "workspace_members": [ + "foo 0.1.0 (path+file:[..])" + ], + "workspace_root": "[..]" + } + "#, + ) + .run(); +} + +#[cargo_test] +fn target_edition_2018() { + let p = project() + .file("src/lib.rs", "") + .file("src/main.rs", "") + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + authors = ["wycats@example.com"] + edition = "2015" + + [lib] + edition = "2018" + "#, + ) + .build(); + p.cargo("metadata") + .with_json( + r#" + { + "packages": [ + { + "authors": [ + "wycats@example.com" + ], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "foo 0.1.0 (path+file:[..])", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]Cargo.toml", + "metadata": null, + "name": "foo", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2018", + "kind": [ + "lib" + ], + "name": "foo", + "src_path": "[..]src/lib.rs" + }, + { + "crate_types": [ + "bin" + ], + "doctest": false, + "edition": "2015", + "kind": [ + "bin" + ], + "name": "foo", + "src_path": "[..]src/main.rs" + } + ], + "version": "0.1.0" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foo 0.1.0 (path+file:[..])" + } + ], + "root": "foo 0.1.0 (path+file:[..])" + }, + "target_directory": "[..]", + "version": 1, + "workspace_members": [ + "foo 0.1.0 (path+file:[..])" + ], + "workspace_root": "[..]" + } + "#, + ) + .run(); +} + +#[cargo_test] +fn rename_dependency() { + Package::new("bar", "0.1.0").publish(); + Package::new("bar", "0.2.0").publish(); + + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.0.1" + authors = [] + + [dependencies] + bar = { version = "0.1.0" } + baz = { version = "0.2.0", package = "bar" } + "#, + ) + .file("src/lib.rs", "extern crate bar; extern crate baz;") + .build(); + + p.cargo("metadata") + .with_json( + r#" +{ + "packages": [ + { + "authors": [], + "categories": [], + "dependencies": [ + { + "features": [], + "kind": null, + "name": "bar", + "optional": false, + "rename": null, + "registry": null, + "req": "^0.1.0", + "source": "registry+https://github.com/rust-lang/crates.io-index", + "target": null, + "uses_default_features": true + }, + { + "features": [], + "kind": null, + "name": "bar", + "optional": false, + "rename": "baz", + "registry": null, + "req": "^0.2.0", + "source": "registry+https://github.com/rust-lang/crates.io-index", + "target": null, + "uses_default_features": true + } + ], + "description": null, + "edition": "2015", + "features": {}, + "id": "foo 0.0.1[..]", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]", + "metadata": null, + "name": "foo", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "foo", + "src_path": "[..]" + } + ], + "version": "0.0.1" + }, + { + "authors": [], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]", + "metadata": null, + "name": "bar", + "readme": null, + "repository": null, + "source": "registry+https://github.com/rust-lang/crates.io-index", + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "bar", + "src_path": "[..]" + } + ], + "version": "0.1.0" + }, + { + "authors": [], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "keywords": [], + "license": null, + "license_file": null, + "links": null, + "manifest_path": "[..]", + "metadata": null, + "name": "bar", + "readme": null, + "repository": null, + "source": "registry+https://github.com/rust-lang/crates.io-index", + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "bar", + "src_path": "[..]" + } + ], + "version": "0.2.0" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "dependencies": [], + "deps": [], + "features": [], + "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "dependencies": [ + "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" + ], + "deps": [ + { + "name": "bar", + "pkg": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" + }, + { + "name": "baz", + "pkg": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" + } + ], + "features": [], + "id": "foo 0.0.1[..]" + } + ], + "root": "foo 0.0.1[..]" + }, + "target_directory": "[..]", + "version": 1, + "workspace_members": [ + "foo 0.0.1[..]" + ], + "workspace_root": "[..]" +}"#, + ) + .run(); +} + +#[cargo_test] +fn metadata_links() { + let p = project() + .file( + "Cargo.toml", + r#" + [project] + name = "foo" + version = "0.5.0" + links = "a" + "#, + ) + .file("src/lib.rs", "") + .file("build.rs", "fn main() {}") + .build(); + + p.cargo("metadata") + .with_json( + r#" +{ + "packages": [ + { + "authors": [], + "categories": [], + "dependencies": [], + "description": null, + "edition": "2015", + "features": {}, + "id": "foo 0.5.0 [..]", + "keywords": [], + "license": null, + "license_file": null, + "links": "a", + "manifest_path": "[..]/foo/Cargo.toml", + "metadata": null, + "name": "foo", + "readme": null, + "repository": null, + "source": null, + "targets": [ + { + "crate_types": [ + "lib" + ], + "doctest": true, + "edition": "2015", + "kind": [ + "lib" + ], + "name": "foo", + "src_path": "[..]/foo/src/lib.rs" + }, + { + "crate_types": [ + "bin" + ], + "doctest": false, + "edition": "2015", + "kind": [ + "custom-build" + ], + "name": "build-script-build", + "src_path": "[..]/foo/build.rs" + } + ], + "version": "0.5.0" + } + ], + "resolve": { + "nodes": [ + { + "dependencies": [], + "deps": [], + "features": [], + "id": "foo 0.5.0 [..]" + } + ], + "root": "foo 0.5.0 [..]" + }, + "target_directory": "[..]/foo/target", + "version": 1, + "workspace_members": [ + "foo 0.5.0 [..]" + ], + "workspace_root": "[..]/foo" +} +"#, + ) + .run() +} + +#[cargo_test] +fn deps_with_bin_only() { + let p = project() + .file( + "Cargo.toml", + r#" + [package] + name = "foo" + version = "0.1.0" + [dependencies] + bdep = { path = "bdep" } + "#, + ) + .file("src/lib.rs", "") + .file("bdep/Cargo.toml", &basic_bin_manifest("bdep")) + .file("bdep/src/main.rs", "fn main() {}") + .build(); + + let output = p + .cargo("metadata") + .exec_with_output() + .expect("cargo metadata failed"); + let stdout = std::str::from_utf8(&output.stdout).unwrap(); + let meta: serde_json::Value = serde_json::from_str(stdout).expect("failed to parse json"); + let nodes = &meta["resolve"]["nodes"]; + assert!(nodes[0]["deps"].as_array().unwrap().is_empty()); + assert!(nodes[1]["deps"].as_array().unwrap().is_empty()); +}