diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 02c0ebcec4fef..3250ea266a587 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -104,7 +104,7 @@ impl Allocation { } pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { - Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) + Allocation::from_bytes(slice, Align::from_bytes(1).unwrap()) } pub fn undef(size: Size, align: Align) -> Self { diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs index f1ac4b210583e..7477343891e20 100644 --- a/src/librustc/mir/interpret/error.rs +++ b/src/librustc/mir/interpret/error.rs @@ -527,7 +527,7 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), AlignmentCheckFailed { required, has } => write!(f, "tried to access memory with alignment {}, but alignment {} is required", - has.abi(), required.abi()), + has.bytes(), required.bytes()), TypeNotPrimitive(ty) => write!(f, "expected primitive type, got {}", ty), Layout(ref err) => @@ -537,8 +537,9 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> { MachineError(ref inner) => write!(f, "{}", inner), IncorrectAllocationInformation(size, size2, align, align2) => - write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and \ - align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()), + write!(f, "incorrect alloc info: expected size {} and align {}, \ + got size {} and align {}", + size.bytes(), align.bytes(), size2.bytes(), align2.bytes()), Panic { ref msg, line, col, ref file } => write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col), InvalidDiscriminant(val) => diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs index b1dcfdfcda038..b8f5ce3cdbc7c 100644 --- a/src/librustc/session/code_stats.rs +++ b/src/librustc/session/code_stats.rs @@ -71,7 +71,7 @@ impl CodeStats { let info = TypeSizeInfo { kind, type_description: type_desc.to_string(), - align: align.abi(), + align: align.bytes(), overall_size: overall_size.bytes(), packed: packed, opt_discr_size: opt_discr_size.map(|s| s.bytes()), diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index d7fb8da7acd05..da0a9acede20e 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -226,9 +226,10 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) }; let scalar_pair = |a: Scalar, b: Scalar| { - let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); - let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); - let size = (b_offset + b.value.size(dl)).abi_align(align); + let b_align = b.value.align(dl); + let align = a.value.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.value.size(dl).align_to(b_align.abi); + let size = (b_offset + b.value.size(dl)).align_to(align.abi); LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Arbitrary { @@ -257,10 +258,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { bug!("struct cannot be packed and aligned"); } - let pack = { - let pack = repr.pack as u64; - Align::from_bytes(pack, pack).unwrap() - }; + let pack = Align::from_bytes(repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align @@ -274,7 +272,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut optimize = !repr.inhibit_struct_field_reordering_opt(); if let StructKind::Prefixed(_, align) = kind { - optimize &= align.abi() == 1; + optimize &= align.bytes() == 1; } if optimize { @@ -285,7 +283,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }; let optimizing = &mut inverse_memory_index[..end]; let field_align = |f: &TyLayout<'_>| { - if packed { f.align.min(pack).abi() } else { f.align.abi() } + if packed { f.align.abi.min(pack) } else { f.align.abi } }; match kind { StructKind::AlwaysSized | @@ -312,13 +310,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut offset = Size::ZERO; if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - if packed { - let prefix_align = prefix_align.min(pack); - align = align.max(prefix_align); + let prefix_align = if packed { + prefix_align.min(pack) } else { - align = align.max(prefix_align); - } - offset = prefix_size.abi_align(prefix_align); + prefix_align + }; + align = align.max(AbiAndPrefAlign::new(prefix_align)); + offset = prefix_size.align_to(prefix_align); } for &i in &inverse_memory_index { @@ -333,15 +331,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } // Invariant: offset < dl.obj_size_bound() <= 1<<61 - if packed { - let field_pack = field.align.min(pack); - offset = offset.abi_align(field_pack); - align = align.max(field_pack); - } - else { - offset = offset.abi_align(field.align); - align = align.max(field.align); - } + let field_align = if packed { + field.align.min(AbiAndPrefAlign::new(pack)) + } else { + field.align + }; + offset = offset.align_to(field_align.abi); + align = align.max(field_align); debug!("univariant offset: {:?} field: {:#?}", offset, field); offsets[i as usize] = offset; @@ -352,7 +348,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { if repr.align > 0 { let repr_align = repr.align as u64; - align = align.max(Align::from_bytes(repr_align, repr_align).unwrap()); + align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); debug!("univariant repr_align: {:?}", repr_align); } @@ -377,7 +373,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { memory_index = inverse_memory_index; } - let size = min_size.abi_align(align); + let size = min_size.align_to(align.abi); let mut abi = Abi::Aggregate { sized }; // Unpack newtype ABIs and find scalar pairs. @@ -394,7 +390,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { (Some((i, field)), None, None) => { // Field fills the struct and it has a scalar or scalar pair ABI. if offsets[i].bytes() == 0 && - align.abi() == field.align.abi() && + align.abi == field.align.abi && size == field.size { match field.abi { // For plain scalars, or vectors of them, we can't unpack @@ -648,7 +644,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; let align = dl.vector_align(size); - let size = size.abi_align(align); + let size = size.align_to(align.abi); tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, @@ -680,10 +676,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { bug!("Union cannot be packed and aligned"); } - let pack = { - let pack = def.repr.pack as u64; - Align::from_bytes(pack, pack).unwrap() - }; + let pack = Align::from_bytes(def.repr.pack as u64).unwrap(); let mut align = if packed { dl.i8_align @@ -694,7 +687,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { if def.repr.align > 0 { let repr_align = def.repr.align as u64; align = align.max( - Align::from_bytes(repr_align, repr_align).unwrap()); + AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap())); } let optimize = !def.repr.inhibit_union_abi_opt(); @@ -704,12 +697,12 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { for field in &variants[index] { assert!(!field.is_unsized()); - if packed { - let field_pack = field.align.min(pack); - align = align.max(field_pack); + let field_align = if packed { + field.align.min(AbiAndPrefAlign::new(pack)) } else { - align = align.max(field.align); - } + field.align + }; + align = align.max(field_align); // If all non-ZST fields have the same ABI, forward this ABI if optimize && !field.is_zst() { @@ -749,7 +742,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { fields: FieldPlacement::Union(variants[index].len()), abi, align, - size: size.abi_align(align) + size: size.align_to(align.abi) })); } @@ -964,19 +957,19 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut size = Size::ZERO; // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256, 256).unwrap(); - assert_eq!(Integer::for_abi_align(dl, start_align), None); + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); // repr(C) on an enum tells us to make a (tag, union) layout, // so we need to grow the prefix alignment to be at least // the alignment of the union. (This value is used both for // determining the alignment of the overall enum, and the // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl); + let mut prefix_align = min_ity.align(dl).abi; if def.repr.c() { for fields in &variants { for field in fields { - prefix_align = prefix_align.max(field.align); + prefix_align = prefix_align.max(field.align.abi); } } } @@ -989,8 +982,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { // Find the first field we can't move later // to make room for a larger discriminant. for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { - if !field.is_zst() || field.align.abi() != 1 { - start_align = start_align.min(field.align); + if !field.is_zst() || field.align.abi.bytes() != 1 { + start_align = start_align.min(field.align.abi); break; } } @@ -1000,7 +993,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { }).collect::, _>>()?; // Align the maximum variant size to the largest alignment. - size = size.abi_align(align); + size = size.align_to(align.abi); if size.bytes() >= dl.obj_size_bound() { return Err(LayoutError::SizeOverflow(ty)); @@ -1036,7 +1029,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let mut ity = if def.repr.c() || def.repr.int.is_some() { min_ity } else { - Integer::for_abi_align(dl, start_align).unwrap_or(min_ity) + Integer::for_align(dl, start_align).unwrap_or(min_ity) }; // If the alignment is not larger than the chosen discriminant size, @@ -1204,7 +1197,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let type_desc = format!("{:?}", layout.ty); self.tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, - layout.align, + layout.align.abi, layout.size, packed, opt_discr_size, @@ -1251,7 +1244,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { name: name.to_string(), offset: offset.bytes(), size: field_layout.size.bytes(), - align: field_layout.align.abi(), + align: field_layout.align.abi.bytes(), } } } @@ -1264,7 +1257,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } else { session::SizeKind::Exact }, - align: layout.align.abi(), + align: layout.align.abi.bytes(), size: if min_size.bytes() == 0 { layout.size.bytes() } else { @@ -1823,7 +1816,9 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { Abi::ScalarPair(ref a, ref b) => { // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. - let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self)))) + let niche = iter::once( + (b, a.value.size(self).align_to(b.value.align(self).abi)) + ) .chain(iter::once((a, Size::ZERO))) .filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) .max_by_key(|niche| niche.available); @@ -1994,12 +1989,16 @@ impl_stable_hash_for!(enum ::ty::layout::Primitive { Pointer }); +impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign { + abi, + pref +}); + impl<'gcx> HashStable> for Align { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - self.abi().hash_stable(hcx, hasher); - self.pref().hash_stable(hcx, hasher); + self.bytes().hash_stable(hcx, hasher); } } diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 76fc5a6eeec7f..3470d6fd0e72a 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -73,7 +73,7 @@ impl ArgAttributesExt for ArgAttributes { if let Some(align) = self.pointee_align { llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), - align.abi() as u32); + align.bytes() as u32); } regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); } @@ -98,7 +98,7 @@ impl ArgAttributesExt for ArgAttributes { if let Some(align) = self.pointee_align { llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, idx.as_uint(), - align.abi() as u32); + align.bytes() as u32); } regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); } @@ -204,7 +204,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { return; } if self.is_sized_indirect() { - OperandValue::Ref(val, None, self.layout.align).store(bx, dst) + OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) } else if self.is_unsized_indirect() { bug!("unsized ArgType must be handled through store_fn_arg"); } else if let PassMode::Cast(cast) = self.mode { @@ -214,7 +214,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { if can_store_through_cast_ptr { let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx())); let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); - bx.store(val, cast_dst, self.layout.align); + bx.store(val, cast_dst, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // ...and then memcpy it to the intended destination. bx.memcpy( dst.llval, - self.layout.align, + self.layout.align.abi, llscratch, scratch_align, bx.cx().const_usize(self.layout.size.bytes()), @@ -273,7 +273,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { OperandValue::Pair(next(), next()).store(bx, dst); } PassMode::Indirect(_, Some(_)) => { - OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst); + OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { self.store(bx, next(), dst); @@ -545,7 +545,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { adjust_for_rust_scalar(&mut b_attrs, b, arg.layout, - a.value.size(cx).abi_align(b.value.align(cx)), + a.value.size(cx).align_to(b.value.align(cx).abi), false); arg.mode = PassMode::Pair(a_attrs, b_attrs); return arg; diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 34e4f4d7e1835..d2a99eae3406f 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -475,7 +475,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMBuildAlloca(self.llbuilder, ty, name.as_ptr()) }; - llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } @@ -494,7 +494,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, name.as_ptr()) }; - llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } @@ -503,7 +503,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); - llvm::LLVMSetAlignment(load, align.abi() as c_uint); + llvm::LLVMSetAlignment(load, align.bytes() as c_uint); load } } @@ -658,7 +658,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let align = if flags.contains(MemFlags::UNALIGNED) { 1 } else { - align.abi() as c_uint + align.bytes() as c_uint }; llvm::LLVMSetAlignment(store, align); if flags.contains(MemFlags::VOLATILE) { @@ -893,8 +893,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let dst = self.pointercast(dst, self.cx().type_i8p()); let src = self.pointercast(src, self.cx().type_i8p()); unsafe { - llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint, - src, src_align.abi() as c_uint, size, is_volatile); + llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); } } @@ -913,8 +913,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let dst = self.pointercast(dst, self.cx().type_i8p()); let src = self.pointercast(src, self.cx().type_i8p()); unsafe { - llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint, - src, src_align.abi() as c_uint, size, is_volatile); + llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, + src, src_align.bytes() as c_uint, size, is_volatile); } } @@ -930,7 +930,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key); let ptr = self.pointercast(ptr, self.cx().type_i8p()); - let align = self.cx().const_u32(align.abi() as u32); + let align = self.cx().const_u32(align.bytes() as u32); let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE)); self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index 2fc505d42db52..cd74a5854a926 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -357,7 +357,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { offset: Size, ) -> PlaceRef<'tcx, &'ll Value> { let init = const_alloc_to_llvm(self, alloc); - let base_addr = self.static_addr_of(init, layout.align, None); + let base_addr = self.static_addr_of(init, layout.align.abi, None); let llval = unsafe { llvm::LLVMConstInBoundsGEP( self.static_bitcast(base_addr, self.type_i8p()), diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 821ac931aac72..07dde2d0301fe 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -94,7 +94,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. if let Some(min) = cx.sess().target.target.options.min_global_align { - match ty::layout::Align::from_bits(min, min) { + match Align::from_bits(min) { Ok(min) => align = align.max(min), Err(err) => { cx.sess().err(&format!("invalid minimum global alignment: {}", err)); @@ -102,7 +102,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, } } unsafe { - llvm::LLVMSetAlignment(gv, align.abi() as u32); + llvm::LLVMSetAlignment(gv, align.bytes() as u32); } } @@ -219,7 +219,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { // Upgrade the alignment in cases where the same constant is used with different // alignment requirements - let llalign = align.abi() as u32; + let llalign = align.bytes() as u32; if llalign > llvm::LLVMGetAlignment(gv) { llvm::LLVMSetAlignment(gv, llalign); } diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 1c787a969324b..81f2769800d2f 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -323,7 +323,7 @@ fn fixed_vec_metadata( llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), size.bits(), - align.abi_bits() as u32, + align.bits() as u32, element_type_metadata, subscripts) }; @@ -465,7 +465,7 @@ fn trait_pointer_metadata( syntax_pos::DUMMY_SP), offset: layout.fields.offset(0), size: data_ptr_field.size, - align: data_ptr_field.align, + align: data_ptr_field.align.abi, flags: DIFlags::FlagArtificial, discriminant: None, }, @@ -474,7 +474,7 @@ fn trait_pointer_metadata( type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), offset: layout.fields.offset(1), size: vtable_field.size, - align: vtable_field.align, + align: vtable_field.align.abi, flags: DIFlags::FlagArtificial, discriminant: None, }, @@ -787,7 +787,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { DIB(cx), name.as_ptr(), size.bits(), - align.abi_bits() as u32, + align.bits() as u32, encoding) }; @@ -818,7 +818,7 @@ fn pointer_type_metadata( DIB(cx), pointee_type_metadata, pointer_size.bits(), - pointer_align.abi_bits() as u32, + pointer_align.bits() as u32, name.as_ptr()) } } @@ -985,13 +985,12 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { f.ident.to_string() }; let field = layout.field(cx, i); - let (size, align) = field.size_and_align(); MemberDescription { name, type_metadata: type_metadata(cx, field.ty, self.span), offset: layout.fields.offset(i), - size, - align, + size: field.size, + align: field.align.abi, flags: DIFlags::FlagZero, discriminant: None, } @@ -1109,13 +1108,12 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { -> Vec> { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); - let (size, align) = field.size_and_align(); MemberDescription { name: f.ident.to_string(), type_metadata: type_metadata(cx, field.ty, self.span), offset: Size::ZERO, - size, - align, + size: field.size, + align: field.align.abi, flags: DIFlags::FlagZero, discriminant: None, } @@ -1228,7 +1226,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, - align: self.layout.align, + align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: None, } @@ -1267,7 +1265,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, - align: self.layout.align, + align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: Some(self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(cx.tcx, i) @@ -1336,7 +1334,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { type_metadata: variant_type_metadata, offset: Size::ZERO, size: variant.size, - align: variant.align, + align: variant.align.abi, flags: DIFlags::FlagZero, discriminant: None, } @@ -1374,7 +1372,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, - align: self.layout.align, + align: self.layout.align.abi, flags: DIFlags::FlagZero, discriminant: niche_value, } @@ -1565,7 +1563,7 @@ fn prepare_enum_metadata( file_metadata, UNKNOWN_LINE_NUMBER, discriminant_size.bits(), - discriminant_align.abi_bits() as u32, + discriminant_align.abi.bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), discriminant_base_type_metadata, true) }; @@ -1587,8 +1585,6 @@ fn prepare_enum_metadata( _ => {} } - let (enum_type_size, enum_type_align) = layout.size_and_align(); - let enum_name = SmallCStr::new(&enum_name); let unique_type_id_str = SmallCStr::new( debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) @@ -1610,8 +1606,8 @@ fn prepare_enum_metadata( enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - enum_type_size.bits(), - enum_type_align.abi_bits() as u32, + layout.size.bits(), + layout.align.abi.bits() as u32, DIFlags::FlagZero, None, 0, // RuntimeLang @@ -1659,7 +1655,7 @@ fn prepare_enum_metadata( file_metadata, UNKNOWN_LINE_NUMBER, size.bits(), - align.abi_bits() as u32, + align.abi.bits() as u32, layout.fields.offset(0).bits(), DIFlags::FlagArtificial, discr_metadata)) @@ -1679,7 +1675,7 @@ fn prepare_enum_metadata( file_metadata, UNKNOWN_LINE_NUMBER, size.bits(), - align.abi_bits() as u32, + align.bits() as u32, layout.fields.offset(0).bits(), DIFlags::FlagArtificial, discr_metadata)) @@ -1695,8 +1691,8 @@ fn prepare_enum_metadata( ptr::null_mut(), file_metadata, UNKNOWN_LINE_NUMBER, - enum_type_size.bits(), - enum_type_align.abi_bits() as u32, + layout.size.bits(), + layout.align.abi.bits() as u32, DIFlags::FlagZero, discriminator_metadata, empty_array, @@ -1712,8 +1708,8 @@ fn prepare_enum_metadata( enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - enum_type_size.bits(), - enum_type_align.abi_bits() as u32, + layout.size.bits(), + layout.align.abi.bits() as u32, DIFlags::FlagZero, None, type_array, @@ -1807,7 +1803,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, member_description.size.bits(), - member_description.align.abi_bits() as u32, + member_description.align.bits() as u32, member_description.offset.bits(), match member_description.discriminant { None => None, @@ -1855,7 +1851,7 @@ fn create_struct_stub( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, struct_size.bits(), - struct_align.abi_bits() as u32, + struct_align.bits() as u32, DIFlags::FlagZero, None, empty_array, @@ -1893,7 +1889,7 @@ fn create_union_stub( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, union_size.bits(), - union_align.abi_bits() as u32, + union_align.bits() as u32, DIFlags::FlagZero, Some(empty_array), 0, // RuntimeLang @@ -1962,7 +1958,7 @@ pub fn create_global_var_metadata( is_local_to_unit, global, None, - global_align.abi() as u32, + global_align.bytes() as u32, ); } } @@ -2000,7 +1996,7 @@ pub fn create_vtable_metadata( unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, Size::ZERO.bits(), - cx.tcx.data_layout.pointer_align.abi_bits() as u32, + cx.tcx.data_layout.pointer_align.abi.bits() as u32, DIFlags::FlagArtificial, None, empty_array, diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 9784cc6cf9c80..e200da2b0909e 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -201,7 +201,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { cx.sess().opts.optimize != config::OptLevel::No, DIFlags::FlagZero, argument_index, - align.abi() as u32, + align.bytes() as u32, ) }; source_loc::set_debug_location(self, diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index b2f1f933da4db..3548ccfd5a537 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -110,7 +110,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let name = &*tcx.item_name(def_id).as_str(); let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx()); - let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi); let simple = get_simple_intrinsic(self.cx(), name); let llval = match name { @@ -158,7 +158,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } "min_align_of" => { let tp_ty = substs.type_at(0); - self.cx().const_usize(self.cx().align_of(tp_ty).abi()) + self.cx().const_usize(self.cx().align_of(tp_ty).bytes()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -167,12 +167,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llalign } else { - self.cx().const_usize(self.cx().align_of(tp_ty).abi()) + self.cx().const_usize(self.cx().align_of(tp_ty).bytes()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - self.cx().const_usize(self.cx().align_of(tp_ty).pref()) + self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes()) } "type_name" => { let tp_ty = substs.type_at(0); @@ -261,7 +261,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let align = if name == "unaligned_volatile_load" { 1 } else { - self.cx().align_of(tp_ty).abi() as u32 + self.cx().align_of(tp_ty).bytes() as u32 }; unsafe { llvm::LLVMSetAlignment(load, align); @@ -815,7 +815,7 @@ fn try_intrinsic( ) { if bx.cx().sess().no_landing_pads() { bx.call(func, &[data], None); - let ptr_align = bx.tcx().data_layout.pointer_align; + let ptr_align = bx.tcx().data_layout.pointer_align.abi; bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align); } else if wants_msvc_seh(bx.cx().sess()) { codegen_msvc_try(bx, func, data, local_ptr, dest); @@ -890,7 +890,7 @@ fn codegen_msvc_try( // // More information can be found in libstd's seh.rs implementation. let i64p = bx.cx().type_ptr_to(bx.cx().type_i64()); - let ptr_align = bx.tcx().data_layout.pointer_align; + let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -906,7 +906,7 @@ fn codegen_msvc_try( let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); - let i64_align = bx.tcx().data_layout.i64_align; + let i64_align = bx.tcx().data_layout.i64_align.abi; let arg1 = catchpad.load(addr, i64_align); let val1 = bx.cx().const_i32(1); let gep1 = catchpad.inbounds_gep(addr, &[val1]); @@ -923,7 +923,7 @@ fn codegen_msvc_try( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llfn, &[func, data, local_ptr], None); - let i32_align = bx.tcx().data_layout.i32_align; + let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -982,7 +982,7 @@ fn codegen_gnu_try( let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p())); let ptr = catch.extract_value(vals, 0); - let ptr_align = bx.tcx().data_layout.pointer_align; + let ptr_align = bx.tcx().data_layout.pointer_align.abi; let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p())); catch.store(ptr, bitcast, ptr_align); catch.ret(bx.cx().const_i32(1)); @@ -991,7 +991,7 @@ fn codegen_gnu_try( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). let ret = bx.call(llfn, &[func, data, local_ptr], None); - let i32_align = bx.tcx().data_layout.i32_align; + let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1436,7 +1436,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = bx.cx().type_i32(); - let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { @@ -1536,7 +1536,7 @@ fn generic_simd_intrinsic( // Alignment of T, must be a constant integer value: let alignment_ty = bx.cx().type_i32(); - let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 90c02cddb2b60..15b5bdeb44d60 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -80,7 +80,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = cx.type_padding_filler( layout.size, layout.align); + let fill = cx.type_padding_filler(layout.size, layout.align.abi); let packed = false; match name { None => { @@ -120,23 +120,23 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let mut packed = false; let mut offset = Size::ZERO; - let mut prev_effective_align = layout.align; + let mut prev_effective_align = layout.align.abi; let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); - let effective_field_align = layout.align - .min(field.align) + let effective_field_align = layout.align.abi + .min(field.align.abi) .restrict_for_offset(target_offset); - packed |= effective_field_align.abi() < field.align.abi(); + packed |= effective_field_align < field.align.abi; debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \ effective_field_align: {}", - i, field, offset, target_offset, effective_field_align.abi()); + i, field, offset, target_offset, effective_field_align.bytes()); assert!(target_offset >= offset); let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); - assert_eq!(offset.abi_align(padding_align) + padding, target_offset); + assert_eq!(offset.align_to(padding_align) + padding, target_offset); result.push(cx.type_padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); @@ -151,7 +151,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } let padding = layout.size - offset; let padding_align = prev_effective_align; - assert_eq!(offset.abi_align(padding_align) + padding, layout.size); + assert_eq!(offset.align_to(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); result.push(cx.type_padding_filler(padding, padding_align)); @@ -166,7 +166,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, impl<'a, 'tcx> CodegenCx<'a, 'tcx> { pub fn align_of(&self, ty: Ty<'tcx>) -> Align { - self.layout_of(ty).align + self.layout_of(ty).align.abi } pub fn size_of(&self, ty: Ty<'tcx>) -> Size { @@ -174,7 +174,8 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { - self.layout_of(ty).size_and_align() + let layout = self.layout_of(ty); + (layout.size, layout.align.abi) } } @@ -332,7 +333,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - cx.type_pointee_for_abi_align( pointee.align) + cx.type_pointee_for_align(pointee.align) } else { cx.type_i8() }; @@ -376,7 +377,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let offset = if index == 0 { Size::ZERO } else { - a.value.size(cx).abi_align(b.value.align(cx)) + a.value.size(cx).align_to(b.value.align(cx).abi) }; self.scalar_llvm_type_at(cx, scalar, offset) } diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs index 515f36b5c65de..bb28ea74dc002 100644 --- a/src/librustc_codegen_ssa/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -25,14 +25,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( t: Ty<'tcx>, info: Option ) -> (Bx::Value, Bx::Value) { - debug!("calculate size of DST: {}; with lost info: {:?}", - t, info); - if bx.cx().type_is_sized(t) { - let (size, align) = bx.cx().layout_of(t).size_and_align(); - debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", - t, info, size, align); - let size = bx.cx().const_usize(size.bytes()); - let align = bx.cx().const_usize(align.abi()); + let layout = bx.cx().layout_of(t); + debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", + t, info, layout); + if !layout.is_unsized() { + let size = bx.cx().const_usize(layout.size.bytes()); + let align = bx.cx().const_usize(layout.align.abi.bytes()); return (size, align); } match t.sty { @@ -42,24 +40,22 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable)) } ty::Slice(_) | ty::Str => { - let unit = t.sequence_element_type(bx.tcx()); + let unit = layout.field(bx.cx(), 0); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx().layout_of(unit).size_and_align(); - (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), - bx.cx().const_usize(align.abi())) + (bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())), + bx.cx().const_usize(unit.align.abi.bytes())) } _ => { // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we // want to avoid, as the unsized field's alignment could be smaller. assert!(!t.is_simd()); - let layout = bx.cx().layout_of(t); debug!("DST {} layout: {:?}", t, layout); let i = layout.fields.count() - 1; let sized_size = layout.fields.offset(i).bytes(); - let sized_align = layout.align.abi(); + let sized_align = layout.align.abi.bytes(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = bx.cx().const_usize(sized_size); diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index 06c4f7a87d880..e45cccee34946 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -41,7 +41,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { llvtable, bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty)) ); - let ptr_align = bx.tcx().data_layout.pointer_align; + let ptr_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); let ptr = bx.load(gep, ptr_align); bx.nonnull_metadata(ptr); @@ -59,7 +59,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { debug!("get_int({:?}, {:?})", llvtable, self); let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); - let usize_align = bx.tcx().data_layout.pointer_align; + let usize_align = bx.tcx().data_layout.pointer_align.abi; let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); let ptr = bx.load(gep, usize_align); // Vtable loads are invariant @@ -100,19 +100,19 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( }) }); - let (size, align) = cx.layout_of(ty).size_and_align(); + let layout = cx.layout_of(ty); // ///////////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_mir/interpret/traits.rs // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)), - cx.const_usize(size.bytes()), - cx.const_usize(align.abi()) + cx.const_usize(layout.size.bytes()), + cx.const_usize(layout.align.abi.bytes()) ].iter().cloned().chain(methods).collect(); let vtable_const = cx.const_struct(&components, false); - let align = cx.data_layout().pointer_align; + let align = cx.data_layout().pointer_align.abi; let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); cx.create_vtable_metadata(ty, vtable); diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 1702ad19b76fd..75a6f07124ae7 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -280,7 +280,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { scratch.llval } Ref(llval, _, align) => { - assert_eq!(align.abi(), op.layout.align.abi(), + assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); llval } @@ -288,7 +288,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let addr = bx.pointercast(llslot, bx.cx().type_ptr_to( bx.cx().cast_backend_type(&cast_ty) )); - bx.load(addr, self.fn_ty.ret.layout.align) + bx.load(addr, self.fn_ty.ret.layout.align.abi) } }; bx.ret(llval); @@ -386,9 +386,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let filename = bx.cx().const_str_slice(filename); let line = bx.cx().const_u32(loc.line as u32); let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align - .max(tcx.data_layout.i32_align) - .max(tcx.data_layout.pointer_align); + let align = tcx.data_layout.aggregate_align.abi + .max(tcx.data_layout.i32_align.abi) + .max(tcx.data_layout.pointer_align.abi); // Put together the arguments to the panic entry point. let (lang_item, args) = match *msg { @@ -522,9 +522,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let filename = bx.cx().const_str_slice(filename); let line = bx.cx().const_u32(loc.line as u32); let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align - .max(tcx.data_layout.i32_align) - .max(tcx.data_layout.pointer_align); + let align = tcx.data_layout.aggregate_align.abi + .max(tcx.data_layout.i32_align.abi) + .max(tcx.data_layout.pointer_align.abi); let str = format!( "Attempted to instantiate uninhabited type {} using mem::{}", @@ -800,12 +800,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (scratch.llval, scratch.align, true) } _ => { - (op.immediate_or_packed_pair(bx), arg.layout.align, false) + (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false) } } } Ref(llval, _, align) => { - if arg.is_indirect() && align.abi() < arg.layout.align.abi() { + if arg.is_indirect() && align < arg.layout.align.abi { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. @@ -826,7 +826,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let addr = bx.pointercast(llval, bx.cx().type_ptr_to( bx.cx().cast_backend_type(&ty)) ); - llval = bx.load(addr, align.min(arg.layout.align)); + llval = bx.load(addr, align.min(arg.layout.align.abi)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -1006,7 +1006,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_place(bx, dest) }; if fn_ret.is_indirect() { - if dest.align.abi() < dest.layout.align.abi() { + if dest.align < dest.layout.align.abi { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps), @@ -1062,7 +1062,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let src = self.codegen_operand(bx, src); let llty = bx.cx().backend_type(src.layout); let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); - let align = src.layout.align.min(dst.layout.align); + let align = src.layout.align.abi.min(dst.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 0579afe1d49c9..fdc9a37a9eb3f 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -304,7 +304,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); let llretptr = fx.cx.get_param(llfn, 0); - LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) + LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { @@ -555,7 +555,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; - PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) + PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index d574d89d67e32..f6917906d4a8a 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -152,7 +152,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { llval: llptr, llextra, layout, - align: layout.align, + align: layout.align.abi, } } @@ -228,7 +228,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Immediate(a_llval) } else { assert_eq!(offset, a.value.size(bx.cx()) - .abi_align(b.value.align(bx.cx()))); + .align_to(b.value.align(bx.cx()).abi)); assert_eq!(field.size, b.value.size(bx.cx())); OperandValue::Immediate(b_llval) } @@ -348,8 +348,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { }; // FIXME: choose an appropriate alignment, or use dynamic align somehow - let max_align = Align::from_bits(128, 128).unwrap(); - let min_align = Align::from_bits(8, 8).unwrap(); + let max_align = Align::from_bits(128).unwrap(); + let min_align = Align::from_bits(8).unwrap(); // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); @@ -470,7 +470,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.load_operand(PlaceRef::new_sized( bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), layout, - layout.align, + layout.align.abi, )) }) } diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 5b36ee8fd183c..e6fd6dfca736b 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -58,8 +58,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align); - Self::new_sized(tmp, layout, layout.align) + let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + Self::new_sized(tmp, layout, layout.align.abi) } /// Returns a place for an indirect reference to an unsized place. @@ -109,7 +109,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { self.llval } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx()))); + assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi)); bx.struct_gep(self.llval, 1) } else { bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) @@ -143,7 +143,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { if def.repr.packed() { // FIXME(eddyb) generalize the adjustment when we // start supporting packing to larger alignments. - assert_eq!(self.layout.align.abi(), 1); + assert_eq!(self.layout.align.abi.bytes(), 1); return simple(); } } @@ -308,9 +308,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. let fill_byte = bx.cx().const_u8(0); - let (size, align) = self.layout.size_and_align(); - let size = bx.cx().const_usize(size.bytes()); - bx.memset(self.llval, fill_byte, size, align, MemFlags::empty()); + let size = bx.cx().const_usize(self.layout.size.bytes()); + bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); } let niche = self.project_field(bx, 0); @@ -419,13 +418,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llval = bx.cx().const_undef( bx.cx().type_ptr_to(bx.cx().backend_type(layout)) ); - PlaceRef::new_sized(llval, layout, layout.align) + PlaceRef::new_sized(llval, layout, layout.align.abi) } } } mir::Place::Static(box mir::Static { def_id, ty }) => { let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align) + PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align.abi) }, mir::Place::Projection(box mir::Projection { ref base, diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 6b1efa060fdad..805c1a343d044 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -496,10 +496,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { - let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx().layout_of(content_ty).size_and_align(); - let llsize = bx.cx().const_usize(size.bytes()); - let llalign = bx.cx().const_usize(align.abi()); + let content_ty = self.monomorphize(&content_ty); + let content_layout = bx.cx().layout_of(content_ty); + let llsize = bx.cx().const_usize(content_layout.size.bytes()); + let llalign = bx.cx().const_usize(content_layout.align.abi.bytes()); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); let llty_ptr = bx.cx().backend_type(box_layout); diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 3757c514d2ce9..0b3066f561cce 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -15,10 +15,10 @@ use super::intrinsic::IntrinsicCallMethods; use super::type_::ArgTypeMethods; use super::HasCodegen; use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; -use std::ffi::CStr; use mir::operand::OperandRef; use mir::place::PlaceRef; use rustc::ty::layout::{Align, Size}; +use std::ffi::CStr; use MemFlags; use std::borrow::Cow; diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index 1aa1f45f51746..15976ac516dc6 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -120,16 +120,16 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type { + fn type_pointee_for_align(&self, align: Align) -> Self::Type { // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(self, align); + let ity = layout::Integer::approximate_align(self, align); self.type_from_integer(ity) } /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { - let unit = layout::Integer::approximate_abi_align(self, align); + let unit = layout::Integer::approximate_align(self, align); let size = size.bytes(); let unit_size = unit.size().bytes(); assert_eq!(size % unit_size, 0); diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 3b32fe21adf83..1bc3b322717e5 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -129,7 +129,7 @@ pub fn op_to_const<'tcx>( assert!(meta.is_none()); let ptr = ptr.to_ptr()?; let alloc = ecx.memory.get(ptr.alloc_id)?; - assert!(alloc.align.abi() >= align.abi()); + assert!(alloc.align >= align); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); let mut alloc = alloc.clone(); alloc.align = align; diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index 5908341358246..936b476df39b9 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -316,7 +316,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, Option<(Size, Align)>> { if !layout.is_unsized() { - return Ok(Some(layout.size_and_align())); + return Ok(Some((layout.size, layout.align.abi))); } match layout.ty.sty { ty::Adt(..) | ty::Tuple(..) => { @@ -328,7 +328,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); - let sized_align = layout.align; + let sized_align = layout.align.abi; trace!( "DST {} statically sized prefix size: {:?} align: {:?}", layout.ty, @@ -381,7 +381,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // // `(size + (align-1)) & -align` - Ok(Some((size.abi_align(align), align))) + Ok(Some((size.align_to(align), align))) } ty::Dynamic(..) => { let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?; @@ -391,8 +391,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc ty::Slice(_) | ty::Str => { let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?; - let (elem_size, align) = layout.field(self, 0)?.size_and_align(); - Ok(Some((elem_size * len, align))) + let elem = layout.field(self, 0)?; + Ok(Some((elem.size * len, elem.align.abi))) } ty::Foreign(_) => { @@ -636,7 +636,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc let (ptr, align) = mplace.to_scalar_ptr_align(); match ptr { Scalar::Ptr(ptr) => { - write!(msg, " by align({}) ref:", align.abi()).unwrap(); + write!(msg, " by align({}) ref:", align.bytes()).unwrap(); allocs.push(ptr.alloc_id); } ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), @@ -665,7 +665,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc Place::Ptr(mplace) => { match mplace.ptr { Scalar::Ptr(ptr) => { - trace!("by align({}) ref:", mplace.align.abi()); + trace!("by align({}) ref:", mplace.align.bytes()); self.memory.dump_alloc(ptr.alloc_id); } ptr => trace!(" integral by ref: {:?}", ptr), diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 7ef940059705c..bbee6e0b49ae0 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -60,7 +60,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> match intrinsic_name { "min_align_of" => { let elem_ty = substs.type_at(0); - let elem_align = self.layout_of(elem_ty)?.align.abi(); + let elem_align = self.layout_of(elem_ty)?.align.abi.bytes(); let align_val = Scalar::from_uint(elem_align, dest.layout.size); self.write_scalar(align_val, dest)?; } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 7dd42c6664968..898600d8322d2 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -268,18 +268,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } }; // Check alignment - if alloc_align.abi() < required_align.abi() { + if alloc_align.bytes() < required_align.bytes() { return err!(AlignmentCheckFailed { has: alloc_align, required: required_align, }); } - if offset % required_align.abi() == 0 { + if offset % required_align.bytes() == 0 { Ok(()) } else { - let has = offset % required_align.abi(); + let has = offset % required_align.bytes(); err!(AlignmentCheckFailed { - has: Align::from_bytes(has, has).unwrap(), + has: Align::from_bytes(has).unwrap(), required: required_align, }) } @@ -449,14 +449,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } // Could also be a fn ptr or extern static match self.tcx.alloc_map.lock().get(id) { - Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()), + Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()), Some(AllocType::Static(did)) => { // The only way `get` couldn't have worked here is if this is an extern static assert!(self.tcx.is_foreign_item(did)); // Use size and align of the type let ty = self.tcx.type_of(did); let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap(); - (layout.size, layout.align) + (layout.size, layout.align.abi) } _ => { // Must be a deallocated pointer @@ -521,7 +521,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { "{}({} bytes, alignment {}){}", msg, alloc.bytes.len(), - alloc.align.abi(), + alloc.align.bytes(), extra ); @@ -863,7 +863,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { allow_ptr_and_undef: bool, ) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); + let align = Align::from_bytes(1).unwrap(); if size.bytes() == 0 { self.check_align(ptr, align)?; return Ok(()); @@ -881,7 +881,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn read_bytes(&self, ptr: Scalar, size: Size) -> EvalResult<'tcx, &[u8]> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); + let align = Align::from_bytes(1).unwrap(); if size.bytes() == 0 { self.check_align(ptr, align)?; return Ok(&[]); @@ -891,7 +891,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn write_bytes(&mut self, ptr: Scalar, src: &[u8]) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); + let align = Align::from_bytes(1).unwrap(); if src.is_empty() { self.check_align(ptr, align)?; return Ok(()); @@ -908,7 +908,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { count: Size ) -> EvalResult<'tcx> { // Empty accesses don't need to be valid pointers, but they should still be non-NULL - let align = Align::from_bytes(1, 1).unwrap(); + let align = Align::from_bytes(1).unwrap(); if count.bytes() == 0 { self.check_align(ptr, align)?; return Ok(()); @@ -1035,7 +1035,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { 16 => layout::I128, _ => bug!("bad integer size: {}", size.bytes()), }; - ity.align(self) + ity.align(self).abi } } diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 5d993cfee084c..8238d580022a8 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -285,7 +285,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let (a, b) = (&a.value, &b.value); let (a_size, b_size) = (a.size(self), b.size(self)); let a_ptr = ptr; - let b_offset = a_size.abi_align(b.align(self)); + let b_offset = a_size.align_to(b.align(self).abi); assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use let b_ptr = ptr.offset(b_offset, self)?.into(); let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 9f248d4635074..7ef3dd5f7201e 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -127,7 +127,7 @@ impl MemPlace { /// Produces a Place that will error if attempted to be read from or written to #[inline(always)] pub fn null(cx: &impl HasDataLayout) -> Self { - Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap()) + Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1).unwrap()) } #[inline(always)] @@ -167,8 +167,8 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self { MPlaceTy { mplace: MemPlace::from_scalar_ptr( - Scalar::from_uint(layout.align.abi(), cx.pointer_size()), - layout.align + Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()), + layout.align.abi ), layout } @@ -176,7 +176,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { #[inline] fn from_aligned_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> Self { - MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } + MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } } #[inline] @@ -287,7 +287,7 @@ where let mplace = MemPlace { ptr: val.to_scalar_ptr()?, - align: layout.align, + align: layout.align.abi, meta: val.to_meta()?, }; Ok(MPlaceTy { mplace, layout }) @@ -356,11 +356,11 @@ where // FIXME: Once we have made decisions for how to handle size and alignment // of `extern type`, this should be adapted. It is just a temporary hack // to get some code to work that probably ought to work. - field_layout.align, + field_layout.align.abi, None => bug!("Cannot compute offset for extern type field at non-0 offset"), }; - (base.meta, offset.abi_align(align)) + (base.meta, offset.align_to(align)) } else { // base.meta could be present; we might be accessing a sized field of an unsized // struct. @@ -730,7 +730,7 @@ where } self.memory.write_scalar( - ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size + ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size ) } Immediate::ScalarPair(a_val, b_val) => { @@ -740,8 +740,8 @@ where dest.layout) }; let (a_size, b_size) = (a.size(self), b.size(self)); - let (a_align, b_align) = (a.align(self), b.align(self)); - let b_offset = a_size.abi_align(b_align); + let (a_align, b_align) = (a.align(self).abi, b.align(self).abi); + let b_offset = a_size.align_to(b_align); let b_ptr = ptr.offset(b_offset, self)?.into(); // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, @@ -899,7 +899,7 @@ where // FIXME: What should we do here? We should definitely also tag! Ok(MPlaceTy::dangling(layout, self)) } else { - let ptr = self.memory.allocate(layout.size, layout.align, kind)?; + let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; let ptr = M::tag_new_allocation(self, ptr, kind)?; Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) } @@ -998,7 +998,8 @@ where if cfg!(debug_assertions) { let (size, align) = self.read_size_and_align_from_vtable(vtable)?; assert_eq!(size, layout.size); - assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved + // only ABI alignment is preserved + assert_eq!(align, layout.align.abi); } let mplace = MPlaceTy { diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 6070b31d3e7a3..fd17a4a71295b 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -401,7 +401,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { let ptr_size = self.pointer_size(); - let ptr_align = self.tcx.data_layout.pointer_align; + let ptr_align = self.tcx.data_layout.pointer_align.abi; let ptr = self.deref_operand(args[0])?; let vtable = ptr.vtable()?; let fn_ptr = self.memory.read_ptr_sized( diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index c5366a5ce6a4d..f11fd45b753fc 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -42,10 +42,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); let size = layout.size.bytes(); - let align = layout.align.abi(); + let align = layout.align.abi.bytes(); let ptr_size = self.pointer_size(); - let ptr_align = self.tcx.data_layout.pointer_align; + let ptr_align = self.tcx.data_layout.pointer_align.abi; // ///////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_codegen_llvm/meth.rs @@ -87,7 +87,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> vtable: Pointer, ) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> { // we don't care about the pointee type, we just want a pointer - let pointer_align = self.tcx.data_layout.pointer_align; + let pointer_align = self.tcx.data_layout.pointer_align.abi; let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; let drop_instance = self.memory.get_fn(drop_fn)?; trace!("Found drop fn: {:?}", drop_instance); @@ -103,13 +103,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> vtable: Pointer, ) -> EvalResult<'tcx, (Size, Align)> { let pointer_size = self.pointer_size(); - let pointer_align = self.tcx.data_layout.pointer_align; + let pointer_align = self.tcx.data_layout.pointer_align.abi; let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)? .to_bits(pointer_size)? as u64; let align = self.memory.read_ptr_sized( vtable.offset(pointer_size * 2, self)?, pointer_align )?.to_bits(pointer_size)? as u64; - Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) + Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap())) } } diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index ad7ffd291bed1..6d1cacfa1479c 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -355,7 +355,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> // for the purpose of validity, consider foreign types to have // alignment and size determined by the layout (size will be 0, // alignment should take attributes into account). - .unwrap_or_else(|| layout.size_and_align()); + .unwrap_or_else(|| (layout.size, layout.align.abi)); match self.ecx.memory.check_align(ptr, align) { Ok(_) => {}, Err(err) => { @@ -463,7 +463,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> // for function pointers. let non_null = self.ecx.memory.check_align( - Scalar::Ptr(ptr), Align::from_bytes(1, 1).unwrap() + Scalar::Ptr(ptr), Align::from_bytes(1).unwrap() ).is_ok() || self.ecx.memory.get_fn(ptr).is_ok(); if !non_null { diff --git a/src/librustc_mir/util/alignment.rs b/src/librustc_mir/util/alignment.rs index 8717bd08ae4a5..a96c5dd687069 100644 --- a/src/librustc_mir/util/alignment.rs +++ b/src/librustc_mir/util/alignment.rs @@ -30,7 +30,7 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let ty = place.ty(local_decls, tcx).to_ty(tcx); match tcx.layout_raw(param_env.and(ty)) { - Ok(layout) if layout.align.abi() == 1 => { + Ok(layout) if layout.align.abi.bytes() == 1 => { // if the alignment is 1, the type can't be further // disaligned. debug!("is_disaligned({:?}) - align = 1", place); diff --git a/src/librustc_target/abi/call/arm.rs b/src/librustc_target/abi/call/arm.rs index b4ffae7385aa7..bf497c09bdc71 100644 --- a/src/librustc_target/abi/call/arm.rs +++ b/src/librustc_target/abi/call/arm.rs @@ -93,7 +93,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool) } } - let align = arg.layout.align.abi(); + let align = arg.layout.align.abi.bytes(); let total = arg.layout.size; arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, diff --git a/src/librustc_target/abi/call/mips.rs b/src/librustc_target/abi/call/mips.rs index a40cb6c76f0cd..abe0bd07892d9 100644 --- a/src/librustc_target/abi/call/mips.rs +++ b/src/librustc_target/abi/call/mips.rs @@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) diff --git a/src/librustc_target/abi/call/mips64.rs b/src/librustc_target/abi/call/mips64.rs index adf5a3c94ea01..d375b163164de 100644 --- a/src/librustc_target/abi/call/mips64.rs +++ b/src/librustc_target/abi/call/mips64.rs @@ -118,9 +118,9 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>) // We only care about aligned doubles if let abi::Abi::Scalar(ref scalar) = field.abi { if let abi::Float(abi::FloatTy::F64) = scalar.value { - if offset.is_abi_aligned(dl.f64_align) { + if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) - assert!(last_offset.is_abi_aligned(dl.f64_align)); + assert!(last_offset.is_aligned(dl.f64_align.abi)); for _ in 0..((offset - last_offset).bits() / 64) .min((prefix.len() - prefix_index) as u64) { diff --git a/src/librustc_target/abi/call/mod.rs b/src/librustc_target/abi/call/mod.rs index 8f9ef2544e602..489bb37fc26ff 100644 --- a/src/librustc_target/abi/call/mod.rs +++ b/src/librustc_target/abi/call/mod.rs @@ -142,23 +142,23 @@ impl Reg { match self.kind { RegKind::Integer => { match self.size.bits() { - 1 => dl.i1_align, - 2..=8 => dl.i8_align, - 9..=16 => dl.i16_align, - 17..=32 => dl.i32_align, - 33..=64 => dl.i64_align, - 65..=128 => dl.i128_align, + 1 => dl.i1_align.abi, + 2..=8 => dl.i8_align.abi, + 9..=16 => dl.i16_align.abi, + 17..=32 => dl.i32_align.abi, + 33..=64 => dl.i64_align.abi, + 65..=128 => dl.i128_align.abi, _ => panic!("unsupported integer: {:?}", self) } } RegKind::Float => { match self.size.bits() { - 32 => dl.f32_align, - 64 => dl.f64_align, + 32 => dl.f32_align.abi, + 64 => dl.f64_align.abi, _ => panic!("unsupported float: {:?}", self) } } - RegKind::Vector => dl.vector_align(self.size) + RegKind::Vector => dl.vector_align(self.size).abi, } } } @@ -227,13 +227,13 @@ impl CastTarget { pub fn size(&self, cx: &C) -> Size { (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) - .abi_align(self.rest.align(cx)) + self.rest.total + .align_to(self.rest.align(cx)) + self.rest.total } pub fn align(&self, cx: &C) -> Align { self.prefix.iter() .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx))) - .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), + .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| acc.max(align)) } } @@ -369,7 +369,7 @@ impl<'a, Ty> ArgType<'a, Ty> { attrs.pointee_size = self.layout.size; // FIXME(eddyb) We should be doing this, but at least on // i686-pc-windows-msvc, it results in wrong stack offsets. - // attrs.pointee_align = Some(self.layout.align); + // attrs.pointee_align = Some(self.layout.align.abi); let extra_attrs = if self.layout.is_unsized() { Some(ArgAttributes::new()) diff --git a/src/librustc_target/abi/call/powerpc.rs b/src/librustc_target/abi/call/powerpc.rs index b9b012020b7f4..a71f322632088 100644 --- a/src/librustc_target/abi/call/powerpc.rs +++ b/src/librustc_target/abi/call/powerpc.rs @@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) diff --git a/src/librustc_target/abi/call/powerpc64.rs b/src/librustc_target/abi/call/powerpc64.rs index f7ef1390f14de..99f07c5702a8f 100644 --- a/src/librustc_target/abi/call/powerpc64.rs +++ b/src/librustc_target/abi/call/powerpc64.rs @@ -13,7 +13,7 @@ // need to be fixed when PowerPC vector support is added. use abi::call::{FnType, ArgType, Reg, RegKind, Uniform}; -use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; +use abi::{Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; use spec::HasTargetSpec; #[derive(Debug, Clone, Copy, PartialEq)] @@ -120,8 +120,8 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI) } else { // Aggregates larger than a doubleword should be padded // at the tail to fill out a whole number of doublewords. - let align = Align::from_bits(64, 64).unwrap(); - (Reg::i64(), size.abi_align(align)) + let reg_i64 = Reg::i64(); + (reg_i64, size.align_to(reg_i64.align(cx))) }; arg.cast_to(Uniform { diff --git a/src/librustc_target/abi/call/sparc.rs b/src/librustc_target/abi/call/sparc.rs index a40cb6c76f0cd..abe0bd07892d9 100644 --- a/src/librustc_target/abi/call/sparc.rs +++ b/src/librustc_target/abi/call/sparc.rs @@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType, offset: &mut Size) { let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; if arg.layout.is_aggregate() { arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if !offset.is_abi_aligned(align) { + if !offset.is_aligned(align) { arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = offset.abi_align(align) + size.abi_align(align); + *offset = offset.align_to(align) + size.align_to(align); } pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType) diff --git a/src/librustc_target/abi/call/x86_64.rs b/src/librustc_target/abi/call/x86_64.rs index 4c9446508939e..f091f80924d59 100644 --- a/src/librustc_target/abi/call/x86_64.rs +++ b/src/librustc_target/abi/call/x86_64.rs @@ -41,7 +41,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>) where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf> + HasDataLayout { - if !off.is_abi_aligned(layout.align) { + if !off.is_aligned(layout.align.abi) { if !layout.is_zst() { return Err(Memory); } diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs index 22afb0da05bc1..50ce0ad6915da 100644 --- a/src/librustc_target/abi/mod.rs +++ b/src/librustc_target/abi/mod.rs @@ -13,7 +13,7 @@ pub use self::Primitive::*; use spec::Target; -use std::{cmp, fmt}; +use std::fmt; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; @@ -24,42 +24,44 @@ pub mod call; /// for a target, which contains everything needed to compute layouts. pub struct TargetDataLayout { pub endian: Endian, - pub i1_align: Align, - pub i8_align: Align, - pub i16_align: Align, - pub i32_align: Align, - pub i64_align: Align, - pub i128_align: Align, - pub f32_align: Align, - pub f64_align: Align, + pub i1_align: AbiAndPrefAlign, + pub i8_align: AbiAndPrefAlign, + pub i16_align: AbiAndPrefAlign, + pub i32_align: AbiAndPrefAlign, + pub i64_align: AbiAndPrefAlign, + pub i128_align: AbiAndPrefAlign, + pub f32_align: AbiAndPrefAlign, + pub f64_align: AbiAndPrefAlign, pub pointer_size: Size, - pub pointer_align: Align, - pub aggregate_align: Align, + pub pointer_align: AbiAndPrefAlign, + pub aggregate_align: AbiAndPrefAlign, /// Alignments for vector types. - pub vector_align: Vec<(Size, Align)>, + pub vector_align: Vec<(Size, AbiAndPrefAlign)>, + pub instruction_address_space: u32, } impl Default for TargetDataLayout { /// Creates an instance of `TargetDataLayout`. fn default() -> TargetDataLayout { + let align = |bits| Align::from_bits(bits).unwrap(); TargetDataLayout { endian: Endian::Big, - i1_align: Align::from_bits(8, 8).unwrap(), - i8_align: Align::from_bits(8, 8).unwrap(), - i16_align: Align::from_bits(16, 16).unwrap(), - i32_align: Align::from_bits(32, 32).unwrap(), - i64_align: Align::from_bits(32, 64).unwrap(), - i128_align: Align::from_bits(32, 64).unwrap(), - f32_align: Align::from_bits(32, 32).unwrap(), - f64_align: Align::from_bits(64, 64).unwrap(), + i1_align: AbiAndPrefAlign::new(align(8)), + i8_align: AbiAndPrefAlign::new(align(8)), + i16_align: AbiAndPrefAlign::new(align(16)), + i32_align: AbiAndPrefAlign::new(align(32)), + i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, + i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) }, + f32_align: AbiAndPrefAlign::new(align(32)), + f64_align: AbiAndPrefAlign::new(align(64)), pointer_size: Size::from_bits(64), - pointer_align: Align::from_bits(64, 64).unwrap(), - aggregate_align: Align::from_bits(0, 64).unwrap(), + pointer_align: AbiAndPrefAlign::new(align(64)), + aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) }, vector_align: vec![ - (Size::from_bits(64), Align::from_bits(64, 64).unwrap()), - (Size::from_bits(128), Align::from_bits(128, 128).unwrap()) + (Size::from_bits(64), AbiAndPrefAlign::new(align(64))), + (Size::from_bits(128), AbiAndPrefAlign::new(align(128))), ], instruction_address_space: 0, } @@ -94,11 +96,17 @@ impl TargetDataLayout { if s.is_empty() { return Err(format!("missing alignment for `{}` in \"data-layout\"", cause)); } + let align_from_bits = |bits| { + Align::from_bits(bits).map_err(|err| { + format!("invalid alignment for `{}` in \"data-layout\": {}", + cause, err) + }) + }; let abi = parse_bits(s[0], "alignment", cause)?; let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?; - Align::from_bits(abi, pref).map_err(|err| { - format!("invalid alignment for `{}` in \"data-layout\": {}", - cause, err) + Ok(AbiAndPrefAlign { + abi: align_from_bits(abi)?, + pref: align_from_bits(pref)?, }) }; @@ -205,7 +213,7 @@ impl TargetDataLayout { } } - pub fn vector_align(&self, vec_size: Size) -> Align { + pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { for &(size, align) in &self.vector_align { if size == vec_size { return align; @@ -213,8 +221,7 @@ impl TargetDataLayout { } // Default to natural alignment, which is what LLVM does. // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() + AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) } } @@ -270,14 +277,14 @@ impl Size { } #[inline] - pub fn abi_align(self, align: Align) -> Size { - let mask = align.abi() - 1; + pub fn align_to(self, align: Align) -> Size { + let mask = align.bytes() - 1; Size::from_bytes((self.bytes() + mask) & !mask) } #[inline] - pub fn is_abi_aligned(self, align: Align) -> bool { - let mask = align.abi() - 1; + pub fn is_aligned(self, align: Align) -> bool { + let mask = align.bytes() - 1; self.bytes() & mask == 0 } @@ -358,78 +365,45 @@ impl AddAssign for Size { } } -/// Alignment of a type in bytes, both ABI-mandated and preferred. -/// Each field is a power of two, giving the alignment a maximum value -/// of 2(28 - 1), which is limited by LLVM to a -/// maximum capacity of 229 or 536870912. -#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)] +/// Alignment of a type in bytes (always a power of two). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Align { - abi_pow2: u8, - pref_pow2: u8, + pow2: u8, } impl Align { - pub fn from_bits(abi: u64, pref: u64) -> Result { - Align::from_bytes(Size::from_bits(abi).bytes(), - Size::from_bits(pref).bytes()) - } - - pub fn from_bytes(abi: u64, pref: u64) -> Result { - let log2 = |align: u64| { - // Treat an alignment of 0 bytes like 1-byte alignment. - if align == 0 { - return Ok(0); - } - - let mut bytes = align; - let mut pow: u8 = 0; - while (bytes & 1) == 0 { - pow += 1; - bytes >>= 1; - } - if bytes != 1 { - Err(format!("`{}` is not a power of 2", align)) - } else if pow > 29 { - Err(format!("`{}` is too large", align)) - } else { - Ok(pow) - } - }; - - Ok(Align { - abi_pow2: log2(abi)?, - pref_pow2: log2(pref)?, - }) + pub fn from_bits(bits: u64) -> Result { + Align::from_bytes(Size::from_bits(bits).bytes()) } - pub fn abi(self) -> u64 { - 1 << self.abi_pow2 - } - - pub fn pref(self) -> u64 { - 1 << self.pref_pow2 - } + pub fn from_bytes(align: u64) -> Result { + // Treat an alignment of 0 bytes like 1-byte alignment. + if align == 0 { + return Ok(Align { pow2: 0 }); + } - pub fn abi_bits(self) -> u64 { - self.abi() * 8 - } + let mut bytes = align; + let mut pow2: u8 = 0; + while (bytes & 1) == 0 { + pow2 += 1; + bytes >>= 1; + } + if bytes != 1 { + return Err(format!("`{}` is not a power of 2", align)); + } + if pow2 > 29 { + return Err(format!("`{}` is too large", align)); + } - pub fn pref_bits(self) -> u64 { - self.pref() * 8 + Ok(Align { pow2 }) } - pub fn min(self, other: Align) -> Align { - Align { - abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2), - pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2), - } + pub fn bytes(self) -> u64 { + 1 << self.pow2 } - pub fn max(self, other: Align) -> Align { - Align { - abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2), - pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2), - } + pub fn bits(self) -> u64 { + self.bytes() * 8 } /// Compute the best alignment possible for the given offset @@ -437,10 +411,8 @@ impl Align { /// /// NB: for an offset of `0`, this happens to return `2^64`. pub fn max_for_offset(offset: Size) -> Align { - let pow2 = offset.bytes().trailing_zeros() as u8; Align { - abi_pow2: pow2, - pref_pow2: pow2, + pow2: offset.bytes().trailing_zeros() as u8, } } @@ -451,6 +423,36 @@ impl Align { } } +/// A pair of aligments, ABI-mandated and preferred. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct AbiAndPrefAlign { + pub abi: Align, + pub pref: Align, +} + +impl AbiAndPrefAlign { + pub fn new(align: Align) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: align, + pref: align, + } + } + + pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: self.abi.min(other.abi), + pref: self.pref.min(other.pref), + } + } + + pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { + abi: self.abi.max(other.abi), + pref: self.pref.max(other.pref), + } + } +} + /// Integers, also used for enum discriminants. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Integer { @@ -472,7 +474,7 @@ impl Integer { } } - pub fn align(self, cx: &C) -> Align { + pub fn align(self, cx: &C) -> AbiAndPrefAlign { let dl = cx.data_layout(); match self { @@ -507,12 +509,11 @@ impl Integer { } /// Find the smallest integer with the given alignment. - pub fn for_abi_align(cx: &C, align: Align) -> Option { + pub fn for_align(cx: &C, wanted: Align) -> Option { let dl = cx.data_layout(); - let wanted = align.abi(); for &candidate in &[I8, I16, I32, I64, I128] { - if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() { + if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() { return Some(candidate); } } @@ -520,13 +521,12 @@ impl Integer { } /// Find the largest integer with the given alignment or less. - pub fn approximate_abi_align(cx: &C, align: Align) -> Integer { + pub fn approximate_align(cx: &C, wanted: Align) -> Integer { let dl = cx.data_layout(); - let wanted = align.abi(); // FIXME(eddyb) maybe include I128 in the future, when it works everywhere. for &candidate in &[I64, I32, I16] { - if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() { + if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() { return candidate; } } @@ -597,7 +597,7 @@ impl<'a, 'tcx> Primitive { } } - pub fn align(self, cx: &C) -> Align { + pub fn align(self, cx: &C) -> AbiAndPrefAlign { let dl = cx.data_layout(); match self { @@ -868,7 +868,7 @@ pub struct LayoutDetails { pub variants: Variants, pub fields: FieldPlacement, pub abi: Abi, - pub align: Align, + pub align: AbiAndPrefAlign, pub size: Size } @@ -949,8 +949,4 @@ impl<'a, Ty> TyLayout<'a, Ty> { Abi::Aggregate { sized } => sized && self.size.bytes() == 0 } } - - pub fn size_and_align(&self) -> (Size, Align) { - (self.size, self.align) - } } diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index eed5d909063bd..02b89a8426850 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1779,7 +1779,7 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De // We are currently checking the type this field came from, so it must be local let span = tcx.hir.span_if_local(field.did).unwrap(); let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false); - let align1 = layout.map(|layout| layout.align.abi() == 1).unwrap_or(false); + let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false); (span, zst, align1) });