diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 07b7adc63bffa..ad4bb0fce22ad 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,7 +10,8 @@ use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use base; -use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; +use builder::Builder; +use common::{type_is_fat_ptr, C_uint}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -236,7 +237,7 @@ impl ArgType { /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } @@ -269,7 +270,7 @@ impl ArgType { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.fcx().alloca(ty, "abi_cast"); + let llscratch = bcx.alloca(ty, "abi_cast"); base::Lifetime::Start.call(bcx, llscratch); // ...where we first store the value... @@ -293,14 +294,14 @@ impl ArgType { } } - pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg(&self, bcx: &Builder, idx: &mut usize, dst: ValueRef) { if self.pad.is_some() { *idx += 1; } if self.is_ignore() { return; } - let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); + let val = llvm::get_param(bcx.llfn(), *idx as c_uint); *idx += 1; self.store(bcx, val, dst); } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index e08f29d24729c..c3b9a56ac9778 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -49,53 +49,20 @@ use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; use common::*; -use glue; +use builder::Builder; use base; use machine; use monomorphize; use type_::Type; use type_of; -use value::Value; - -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - Switch, - Single -} - -#[derive(Copy, Clone)] -pub struct MaybeSizedValue { - pub value: ValueRef, - pub meta: ValueRef, -} - -impl MaybeSizedValue { - pub fn sized(value: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: std::ptr::null_mut() - } - } - - pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: meta - } - } - - pub fn has_meta(&self) -> bool { - !self.meta.is_null() - } -} /// Given an enum, struct, closure, or tuple, extracts fields. /// Treats closures as a struct with one variant. /// `empty_if_no_variants` is a switch to deal with empty enums. /// If true, `variant_index` is disregarded and an empty Vec returned in this case. -fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - variant_index: usize, - empty_if_no_variants: bool) -> Vec> { +pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { match t.sty { ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { Vec::default() @@ -300,28 +267,6 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> } } -/// Obtain a representation of the discriminant sufficient to translate -/// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - range_assert: bool -) -> (BranchKind, Option) { - let l = bcx.ccx.layout_of(t); - match *l { - layout::CEnum { .. } | layout::General { .. } | - layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { - (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) - } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => { - // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (BranchKind::Single, None) - }, - _ => bug!("{} is not an enum.", t) - } -} - pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { match *l { layout::CEnum { signed, .. }=> signed, @@ -331,7 +276,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { /// Obtain the actual discriminant of a value. pub fn trans_get_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, scrutinee: ValueRef, cast_to: Option, @@ -358,7 +303,7 @@ pub fn trans_get_discr<'a, 'tcx>( layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let llptrty = type_of::sizing_type_of(bcx.ccx, - monomorphize::field_ty(bcx.ccx.tcx(), substs, + monomorphize::field_ty(bcx.tcx(), substs, &def.variants[nndiscr as usize].fields[0])); bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } @@ -374,7 +319,7 @@ pub fn trans_get_discr<'a, 'tcx>( } fn struct_wrapped_nullable_bitdiscr( - bcx: &BlockAndBuilder, + bcx: &Builder, nndiscr: u64, discrfield: &layout::FieldPath, scrutinee: ValueRef @@ -387,7 +332,7 @@ fn struct_wrapped_nullable_bitdiscr( } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, +fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { let llty = Type::from_integer(bcx.ccx, ity); @@ -415,7 +360,7 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { +pub fn trans_case<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { discr, .. } @@ -435,9 +380,7 @@ pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr -) { +pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr) { let l = bcx.ccx.layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { @@ -484,11 +427,11 @@ pub fn trans_set_discr<'a, 'tcx>( } } -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } -fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { +pub fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { if min <= max { assert!(min <= discr && discr <= max) } else { @@ -496,303 +439,6 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } } -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - val: MaybeSizedValue, - discr: Disr, - ix: usize -) -> ValueRef { - let l = bcx.ccx.layout_of(t); - debug!("trans_field_ptr on {} represented as {:#?}", t, l); - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *l { - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); - struct_field_ptr(bcx, &variant, - &compute_fields(bcx.ccx, t, 0, false), - val, ix, false) - } - layout::Vector { count, .. } => { - assert_eq!(discr.0, 0); - assert!((ix as u64) < count); - bcx.struct_gep(val.value, ix) - } - layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); - fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false)); - struct_field_ptr(bcx, &variants[discr.0 as usize], - &fields, - val, ix + 1, true) - } - layout::UntaggedUnion { .. } => { - let fields = compute_fields(bcx.ccx, t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { - let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx, nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; - assert_eq!(ix, 0); - assert_eq!(discr.0, nndiscr); - let ty = type_of::type_of(bcx.ccx, nnty); - bcx.pointercast(val.value, ty.ptr_to()) - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr.0, nndiscr); - struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx, t, discr.0 as usize, false), - val, ix, false) - } - _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) - } -} - -fn struct_field_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - st: &layout::Struct, - fields: &Vec>, - val: MaybeSizedValue, - ix: usize, - needs_cast: bool -) -> ValueRef { - let fty = fields[ix]; - let ccx = bcx.ccx; - - let ptr_val = if needs_cast { - let fields = st.field_index_by_increasing_offset().map(|i| { - type_of::in_memory_type_of(ccx, fields[i]) - }).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], st.packed); - bcx.pointercast(val.value, real_ty.ptr_to()) - } else { - val.value - }; - - // Simple case - we can just GEP the field - // * First field - Always aligned properly - // * Packed struct - There is no alignment padding - // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx.shared().type_is_sized(fty) { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - - // If the type of the last field is [T] or str, then we don't need to do - // any adjusments - match fty.sty { - ty::TySlice(..) | ty::TyStr => { - return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); - } - _ => () - } - - // There's no metadata available, log the case and just do the GEP. - if !val.has_meta() { - debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, Value(ptr_val)); - return bcx.struct_gep(ptr_val, ix); - } - - // We need to get the pointer manually now. - // We do this by casting to a *i8, then offsetting it by the appropriate amount. - // We do this instead of, say, simply adjusting the pointer from the result of a GEP - // because the field may have an arbitrary alignment in the LLVM representation - // anyway. - // - // To demonstrate: - // struct Foo { - // x: u16, - // y: T - // } - // - // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that - // the `y` field has 16-bit alignment. - - let meta = val.meta; - - - let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx, offset); - - // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); - - // Bump the unaligned offset up to the appropriate alignment using the - // following expression: - // - // (unaligned offset + (align - 1)) & -align - - // Calculate offset - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); - let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), - bcx.neg(align)); - - debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); - - // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); - let byte_ptr = bcx.gep(byte_ptr, &[offset]); - - // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); - debug!("struct_field_ptr: Field type is {:?}", ll_fty); - bcx.pointercast(byte_ptr, ll_fty.ptr_to()) -} - -/// Construct a constant value, suitable for initializing a -/// GlobalVariable, given a case and constant values for its fields. -/// Note that this may have a different LLVM type (and different -/// alignment!) from the representation's `type_of`, so it needs a -/// pointer cast before use. -/// -/// The LLVM type system does not directly support unions, and only -/// pointers can be bitcast, so a constant (and, by extension, the -/// GlobalVariable initialized by it) will have a type that can vary -/// depending on which case of an enum it is. -/// -/// To understand the alignment situation, consider `enum E { V64(u64), -/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to -/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, -/// i32, i32}`, which is 4-byte aligned. -/// -/// Currently the returned value has the same size as the type, but -/// this could be changed in the future to avoid allocating unnecessary -/// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, - vals: &[ValueRef]) -> ValueRef { - let l = ccx.layout_of(t); - let dl = &ccx.tcx().data_layout; - match *l { - layout::CEnum { discr: d, min, max, .. } => { - assert_eq!(vals.len(), 0); - assert_discr_in_range(Disr(min), Disr(max), discr); - C_integral(Type::from_integer(ccx, d), discr.0, true) - } - layout::General { discr: d, ref variants, .. } => { - let variant = &variants[discr.0 as usize]; - let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); - let mut vals_with_discr = vec![lldiscr]; - vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) - } - layout::UntaggedUnion { ref variants, .. }=> { - assert_eq!(discr, Disr(0)); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) - } - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) - } - layout::Vector { .. } => { - C_vector(vals) - } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; - if discr.0 == nndiscr { - assert_eq!(vals.len(), 1); - vals[0] - } else { - C_null(type_of::sizing_type_of(ccx, nnty)) - } - } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr.0 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) - } else { - let fields = compute_fields(ccx, t, nndiscr as usize, false); - let vals = fields.iter().map(|&ty| { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::sizing_type_of(ccx, ty)) - }).collect::>(); - C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) - } - } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) - } -} - -/// Building structs is a little complicated, because we might need to -/// insert padding if a field's value is less aligned than its type. -/// -/// Continuing the example from `trans_const`, a value of type `(u32, -/// E)` should have the `E` at offset 8, but if that field's -/// initializer is 4-byte aligned then simply translating the tuple as -/// a two-element struct will locate it at offset 4, and accesses to it -/// will read the wrong memory. -fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &layout::Struct, - vals: &[ValueRef]) - -> Vec { - assert_eq!(vals.len(), st.offsets.len()); - - if vals.len() == 0 { - return Vec::new(); - } - - // offset of current value - let mut offset = 0; - let mut cfields = Vec::new(); - cfields.reserve(st.offsets.len()*2); - - let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) - }); - for (&val, target_offset) in parts { - if offset < target_offset { - cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; - } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); - } - - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { - cfields.push(padding(ccx, size - offset)); - } - - cfields -} - -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) -} - // FIXME this utility routine should be somewhere more general #[inline] fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 05699fb9de9a5..c95d414701876 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -15,6 +15,7 @@ use base; use common::*; use type_of; use type_::Type; +use builder::Builder; use rustc::hir; use rustc::ty::Ty; @@ -25,7 +26,7 @@ use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM pub fn trans_inline_asm<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, ia: &hir::InlineAsm, outputs: Vec<(ValueRef, Ty<'tcx>)>, mut inputs: Vec diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 2806be123a936..4cdde24ed48b5 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -37,8 +37,9 @@ use llvm; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; +use rustc::mir::tcx::LvalueTy; use rustc::traits; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::adjustment::CustomCoerceUnsized; use rustc::dep_graph::{DepNode, WorkProduct}; use rustc::hir::map as hir_map; @@ -47,14 +48,15 @@ use session::config::{self, NoDebugInfo}; use rustc_incremental::IncrementalHashesMap; use session::{self, DataTypeKind, Session}; use abi::{self, Abi, FnType}; +use mir::lvalue::LvalueRef; use adt; use attributes; use builder::Builder; use callee::{Callee}; -use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; use common::{C_struct_in_context, C_u64, C_undef}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; use common; @@ -161,7 +163,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } pub fn compare_simd_types<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef, t: Ty<'tcx>, @@ -218,7 +220,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. pub fn unsize_thin_ptr<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst_ty: Ty<'tcx> @@ -242,7 +244,7 @@ pub fn unsize_thin_ptr<'a, 'tcx>( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, src: ValueRef, src_ty: Ty<'tcx>, dst: ValueRef, @@ -278,8 +280,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, monomorphize::field_ty(bcx.tcx(), substs_b, f) }); - let src = adt::MaybeSizedValue::sized(src); - let dst = adt::MaybeSizedValue::sized(dst); + let src = LvalueRef::new_sized_ty(src, src_ty); + let dst = LvalueRef::new_sized_ty(dst, dst_ty); let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { @@ -287,8 +289,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, continue; } - let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); + let src_f = src.trans_field_ptr(bcx, i); + let dst_f = dst.trans_field_ptr(bcx, i); if src_fty == dst_fty { memcpy_ty(bcx, dst_f, src_f, src_fty, None); } else { @@ -322,7 +324,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } pub fn cast_shift_expr_rhs( - cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef + cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } @@ -421,7 +423,7 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { +pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); if common::type_is_fat_ptr(cx.ccx, t) { @@ -433,7 +435,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu } } -pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, +pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>, data: ValueRef, extra: ValueRef, dst: ValueRef, @@ -459,7 +461,7 @@ pub fn load_fat_ptr<'a, 'tcx>( (ptr, meta) } -pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { +pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx) { bcx.zext(val, Type::i8(bcx.ccx)) } else { @@ -467,7 +469,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { bcx.trunc(val, Type::i1(bcx.ccx)) } else { @@ -523,11 +525,13 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - t: Ty<'tcx>, - align: Option) { +pub fn memcpy_ty<'a, 'tcx>( + bcx: &Builder<'a, 'tcx>, + dst: ValueRef, + src: ValueRef, + t: Ty<'tcx>, + align: Option, +) { let ccx = bcx.ccx; if type_is_zero_size(ccx, t) { @@ -553,11 +557,6 @@ pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - assert!(!ty.has_param_types()); - bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name) -} - pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { let _s = if ccx.sess().trans_stats() { let mut instance_name = String::new(); @@ -593,18 +592,17 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let fcx = FunctionContext::new(ccx, lldecl); let mir = ccx.tcx().item_mir(instance.def); - mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi); + mir::trans_mir(ccx, lldecl, fn_ty, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, disr: Disr, - llfndecl: ValueRef) { - attributes::inline(llfndecl, attributes::InlineAttr::Hint); - attributes::set_frame_pointer_elimination(ccx, llfndecl); + llfn: ValueRef) { + attributes::inline(llfn, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfn); let ctor_ty = ccx.tcx().item_type(def_id); let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty); @@ -612,24 +610,29 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let fcx = FunctionContext::new(ccx, llfndecl); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); if !fn_ty.ret.is_ignore() { // But if there are no nested returns, we skip the indirection // and have a single retslot let dest = if fn_ty.ret.is_indirect() { - get_param(fcx.llfn, 0) + get_param(llfn, 0) } else { // We create an alloca to hold a pointer of type `ret.original_ty` // which will hold the pointer to the right alloca which has the // final ret value - fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + }; + // Can return unsized value + let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output()); + dest_val.ty = LvalueTy::Downcast { + adt_def: sig.output().ty_adt_def().unwrap(), + substs: substs, + variant_index: disr.0 as usize, }; - let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); + let lldestptr = dest_val.trans_field_ptr(&bcx, i); let arg = &fn_ty.args[arg_idx]; arg_idx += 1; if common::type_is_fat_ptr(bcx.ccx, arg_ty) { @@ -756,12 +759,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { // `main` should respect same config for frame pointer elimination as rest of code attributes::set_frame_pointer_elimination(ccx, llfn); - let llbb = unsafe { - let name = CString::new("top").unwrap(); - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr()) - }; - let bld = Builder::with_ccx(ccx); - bld.position_at_end(llbb); + let bld = Builder::new_block(ccx, llfn, "top"); debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 865787f48fc52..cf7f3e9501d1a 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -19,12 +19,17 @@ use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; +use rustc::ty::{Ty, TyCtxt, TypeFoldable}; +use rustc::session::Session; +use type_of; use std::borrow::Cow; use std::ffi::CString; use std::ptr; use syntax_pos::Span; +// All Builders must have an llfn associated with them +#[must_use] pub struct Builder<'a, 'tcx: 'a> { pub llbuilder: BuilderRef, pub ccx: &'a CrateContext<'a, 'tcx>, @@ -46,6 +51,20 @@ fn noname() -> *const c_char { } impl<'a, 'tcx> Builder<'a, 'tcx> { + pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self { + let builder = Builder::with_ccx(ccx); + let llbb = unsafe { + let name = CString::new(name).unwrap(); + llvm::LLVMAppendBasicBlockInContext( + ccx.llcx(), + llfn, + name.as_ptr() + ) + }; + builder.position_at_end(llbb); + builder + } + pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { @@ -57,6 +76,30 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn(), name) + } + + pub fn sess(&self) -> &Session { + self.ccx.sess() + } + + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.ccx.tcx() + } + + pub fn llfn(&self) -> ValueRef { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + + pub fn llbb(&self) -> BasicBlockRef { + unsafe { + llvm::LLVMGetInsertBlock(self.llbuilder) + } + } + fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); @@ -435,6 +478,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_start(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn()) + }); + builder.dynamic_alloca(ty, name) + } + + pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef { + assert!(!ty.has_param_types()); + self.alloca(type_of::type_of(self.ccx, ty), name) + } + pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { self.count_insn("alloca"); unsafe { diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index 1abe25ea6073e..257d6c01e4a65 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -23,11 +23,10 @@ use rustc::traits; use abi::{Abi, FnType}; use attributes; use base; -use base::*; -use common::{ - self, CrateContext, FunctionContext, SharedCrateContext -}; -use adt::MaybeSizedValue; +use builder::Builder; +use common::{self, CrateContext, SharedCrateContext}; +use cleanup::CleanupScope; +use mir::lvalue::LvalueRef; use consts; use declare; use value::Value; @@ -330,8 +329,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( attributes::set_frame_pointer_elimination(ccx, lloncefn); let orig_fn_ty = fn_ty; - let fcx = FunctionContext::new(ccx, lloncefn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::new_block(ccx, lloncefn, "entry-block"); let callee = Callee { data: Fn(llreffn), @@ -340,7 +338,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // the first argument (`self`) will be the (by value) closure env. - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(lloncefn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let self_idx = fn_ty.ret.is_indirect() as usize; @@ -348,7 +346,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let llenv = if env_arg.is_indirect() { llargs[self_idx] } else { - let scratch = alloc_ty(&bcx, closure_ty, "self"); + let scratch = bcx.alloca_ty(closure_ty, "self"); let mut llarg_idx = self_idx; env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch @@ -365,12 +363,14 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.schedule_drop_mem(MaybeSizedValue::sized(llenv), closure_ty); + let self_scope = CleanupScope::schedule_drop_mem( + &bcx, LvalueRef::new_sized_ty(llenv, closure_ty) + ); let llfn = callee.reify(bcx.ccx); let llret; if let Some(landing_pad) = self_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -489,10 +489,9 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index 4e59ea3f6c5ed..5d89a67d3fd80 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -20,11 +20,12 @@ use llvm::BasicBlockRef; use base; -use adt::MaybeSizedValue; -use common::{BlockAndBuilder, FunctionContext, Funclet}; +use mir::lvalue::LvalueRef; +use rustc::mir::tcx::LvalueTy; +use builder::Builder; +use common::Funclet; use glue; use type_::Type; -use rustc::ty::Ty; pub struct CleanupScope<'tcx> { // Cleanup to run upon scope exit. @@ -36,14 +37,13 @@ pub struct CleanupScope<'tcx> { #[derive(Copy, Clone)] pub struct DropValue<'tcx> { - val: MaybeSizedValue, - ty: Ty<'tcx>, + val: LvalueRef<'tcx>, skip_dtor: bool, } impl<'tcx> DropValue<'tcx> { - fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { - glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &Builder<'a, 'tcx>) { + glue::call_drop_glue(bcx, self.val, self.skip_dtor, funclet) } /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary @@ -52,13 +52,13 @@ impl<'tcx> DropValue<'tcx> { /// landing_pad -> ... cleanups ... -> [resume] /// /// This should only be called once per function, as it creates an alloca for the landingpad. - fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { + fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef { debug!("get_landing_pad"); - let bcx = fcx.build_new_block("cleanup_unwind"); + let bcx = bcx.build_sibling_block("cleanup_unwind"); let llpersonality = bcx.ccx.eh_personality(); bcx.set_personality_fn(llpersonality); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(bcx.sess()) { let pad = bcx.cleanup_pad(None, &[]); let funclet = Some(Funclet::new(pad)); self.trans(funclet.as_ref(), &bcx); @@ -68,10 +68,10 @@ impl<'tcx> DropValue<'tcx> { // The landing pad return type (the type being propagated). Not sure // what this represents but it's determined by the personality // function and this is what the EH proposal example uses. - let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); + let llretty = Type::struct_(bcx.ccx, &[Type::i8p(bcx.ccx), Type::i32(bcx.ccx)], false); // The only landing pad clause will be 'cleanup' - let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.llfn()); // The landing pad block is a cleanup bcx.set_cleanup(llretval); @@ -92,17 +92,23 @@ impl<'tcx> DropValue<'tcx> { } } -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { +impl<'a, 'tcx> CleanupScope<'tcx> { /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` - pub fn schedule_drop_mem(&self, val: MaybeSizedValue, ty: Ty<'tcx>) -> CleanupScope<'tcx> { - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + pub fn schedule_drop_mem( + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> + ) -> CleanupScope<'tcx> { + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: false, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -110,28 +116,31 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> { /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, val: MaybeSizedValue, ty: Ty<'tcx>) - -> CleanupScope<'tcx> { + pub fn schedule_drop_adt_contents( + bcx: &Builder<'a, 'tcx>, val: LvalueRef<'tcx> + ) -> CleanupScope<'tcx> { + if let LvalueTy::Downcast { .. } = val.ty { + bug!("Cannot drop downcast ty yet"); + } // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } + if !bcx.ccx.shared().type_needs_drop(val.ty.to_ty(bcx.tcx())) { + return CleanupScope::noop(); + } let drop = DropValue { val: val, - ty: ty, skip_dtor: true, }; - CleanupScope::new(self, drop) + CleanupScope::new(bcx, drop) } -} -impl<'tcx> CleanupScope<'tcx> { - fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { + fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { cleanup: Some(drop_val), - landing_pad: if !fcx.ccx.sess().no_landing_pads() { - Some(drop_val.get_landing_pad(fcx)) + landing_pad: if !bcx.sess().no_landing_pads() { + Some(drop_val.get_landing_pad(bcx)) } else { None }, @@ -145,7 +154,7 @@ impl<'tcx> CleanupScope<'tcx> { } } - pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { + pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) { if let Some(cleanup) = self.cleanup { cleanup.trans(None, &bcx); } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7e7bd15dc6e5a..13163518f941e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -12,11 +12,9 @@ //! Code that is useful in various trans modules. -use session::Session; use llvm; -use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; +use llvm::{ValueRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::util::common::MemoizationMap; @@ -37,11 +35,9 @@ use rustc::hir; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; -use std::ops::Deref; -use std::ffi::CString; use syntax::ast; -use syntax::symbol::{Symbol, InternedString}; +use syntax::symbol::InternedString; use syntax_pos::Span; use rustc_i128::u128; @@ -172,191 +168,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - * */ -use Disr; - -/// The concrete version of ty::FieldDef. The name is the field index if -/// the field is numeric. -pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); - -/// The concrete version of ty::VariantDef -pub struct VariantInfo<'tcx> { - pub discr: Disr, - pub fields: Vec> -} - -impl<'a, 'tcx> VariantInfo<'tcx> { - pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - opt_def: Option) - -> Self - { - match ty.sty { - ty::TyAdt(adt, substs) => { - let variant = match opt_def { - None => adt.struct_variant(), - Some(def) => adt.variant_of_def(def) - }; - - VariantInfo { - discr: Disr::from(variant.disr_val), - fields: variant.fields.iter().map(|f| { - Field(f.name, monomorphize::field_ty(tcx, substs, f)) - }).collect() - } - } - - ty::TyTuple(ref v) => { - VariantInfo { - discr: Disr(0), - fields: v.iter().enumerate().map(|(i, &t)| { - Field(Symbol::intern(&i.to_string()), t) - }).collect() - } - } - - _ => { - bug!("cannot get field types from the type {:?}", ty); - } - } - } -} - -// Function context. Every LLVM function we create will have one of these. -pub struct FunctionContext<'a, 'tcx: 'a> { - // The ValueRef returned from a call to llvm::LLVMAddFunction; the - // address of the first instruction in the sequence of - // instructions for this function that will go in the .text - // section of the executable we're generating. - pub llfn: ValueRef, - - // A marker for the place where we want to insert the function's static - // allocas, so that LLVM will coalesce them into a single alloca call. - alloca_insert_pt: Option, - - // This function's enclosing crate context. - pub ccx: &'a CrateContext<'a, 'tcx>, - - alloca_builder: Builder<'a, 'tcx>, -} - -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - /// Create a function context for the given function. - /// Call FunctionContext::get_entry_block for the first entry block. - pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> { - let mut fcx = FunctionContext { - llfn: llfndecl, - alloca_insert_pt: None, - ccx: ccx, - alloca_builder: Builder::with_ccx(ccx), - }; - - let val = { - let entry_bcx = fcx.build_new_block("entry-block"); - let val = entry_bcx.load(C_null(Type::i8p(ccx))); - fcx.alloca_builder.position_at_start(entry_bcx.llbb()); - val - }; - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in the drop of FunctionContext. - fcx.alloca_insert_pt = Some(val); - - fcx - } - - pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(unsafe { - llvm::LLVMGetFirstBasicBlock(self.llfn) - }, self) - } - - pub fn new_block(&'a self, name: &str) -> BasicBlockRef { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMAppendBasicBlockInContext( - self.ccx.llcx(), - self.llfn, - name.as_ptr() - ) - } - } - - pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.new_block(name), self) - } - - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { - self.alloca_builder.dynamic_alloca(ty, name) - } -} - -impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { - fn drop(&mut self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap()); - } - } -} - -#[must_use] -pub struct BlockAndBuilder<'a, 'tcx: 'a> { - // The BasicBlockRef returned from a call to - // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic - // block to the function pointed to by llfn. We insert - // instructions into that block by way of this block context. - // The block pointing to this one in the function's digraph. - llbb: BasicBlockRef, - - // The function context for the function to which this block is - // attached. - fcx: &'a FunctionContext<'a, 'tcx>, - - builder: Builder<'a, 'tcx>, -} - -impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { - pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self { - let builder = Builder::with_ccx(fcx.ccx); - // Set the builder's position to this block's end. - builder.position_at_end(llbb); - BlockAndBuilder { - llbb: llbb, - fcx: fcx, - builder: builder, - } - } - - pub fn at_start(&self, f: F) -> R - where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R - { - self.position_at_start(self.llbb); - let r = f(self); - self.position_at_end(self.llbb); - r - } - - pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { - self.fcx - } - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.ccx.tcx() - } - pub fn sess(&self) -> &'a Session { - self.ccx.sess() - } - - pub fn llbb(&self) -> BasicBlockRef { - self.llbb - } -} - -impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> { - type Target = Builder<'a, 'tcx>; - fn deref(&self) -> &Self::Target { - &self.builder - } -} - /// A structure representing an active landing pad for the duration of a basic /// block. /// @@ -725,7 +536,7 @@ pub fn langcall(tcx: TyCtxt, // of Java. (See related discussion on #1877 and #10183.) pub fn build_unchecked_lshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { @@ -736,7 +547,7 @@ pub fn build_unchecked_lshift<'a, 'tcx>( } pub fn build_unchecked_rshift<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef + bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef ) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid @@ -749,13 +560,13 @@ pub fn build_unchecked_rshift<'a, 'tcx>( } } -fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { +fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } pub fn shift_mask_val<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llty: Type, mask_llty: Type, invert: bool diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index f5a8eeacf38ad..c6f8ba7b6dc78 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -14,7 +14,7 @@ use super::utils::{DIB, span_start}; use llvm; use llvm::debuginfo::{DIScope, DISubprogram}; -use common::{CrateContext, FunctionContext}; +use common::CrateContext; use rustc::mir::{Mir, VisibilityScope}; use libc::c_uint; @@ -44,7 +44,7 @@ impl MirDebugScope { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext) +pub fn create_mir_scopes(ccx: &CrateContext, mir: &Mir, debug_context: &FunctionDebugContext) -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), @@ -71,7 +71,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &Funct // Instantiate all scopes. for idx in 0..mir.visibility_scopes.len() { let scope = VisibilityScope::new(idx); - make_mir_scope(fcx.ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); + make_mir_scope(ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); } scopes diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs index bcf5eb9920076..7a739071506db 100644 --- a/src/librustc_trans/debuginfo/doc.rs +++ b/src/librustc_trans/debuginfo/doc.rs @@ -45,7 +45,7 @@ //! //! All private state used by the module is stored within either the //! CrateDebugContext struct (owned by the CrateContext) or the -//! FunctionDebugContext (owned by the FunctionContext). +//! FunctionDebugContext (owned by the MirContext). //! //! This file consists of three conceptual sections: //! 1. The public interface of the module diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 86099d241df68..9117f49cf3ea5 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,8 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, BlockAndBuilder}; +use common::CrateContext; +use builder::Builder; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -423,7 +424,7 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, dbg_context: &FunctionDebugContext, variable_name: ast::Name, variable_type: Ty<'tcx>, diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index e02c8be19a2f4..e99e26261a3a1 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -38,7 +38,7 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span)); + debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span)); let loc = span_start(builder.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 62141369caec1..4fe07c9b86abf 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -13,6 +13,7 @@ // Code relating to drop glue. use std; +use std::ptr; use std::iter; use llvm; @@ -20,11 +21,14 @@ use llvm::{ValueRef, get_param}; use middle::lang_items::BoxFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; +use rustc::ty::{self, layout, AdtDef, AdtKind, Ty, TypeFoldable}; use rustc::ty::subst::Kind; -use adt::{self, MaybeSizedValue}; +use rustc::mir::tcx::LvalueTy; +use mir::lvalue::LvalueRef; +use adt; use base::*; use callee::Callee; +use cleanup::CleanupScope; use common::*; use machine::*; use monomorphize; @@ -34,15 +38,12 @@ use type_of::{type_of, sizing_type_of, align_of}; use type_::Type; use value::Value; use Disr; -use cleanup::CleanupScope; +use builder::Builder; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_ty<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - ptr: MaybeSizedValue, - content_ty: Ty<'tcx> -) { +pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) { + let content_ty = ptr.ty.to_ty(bcx.tcx()); let def_id = langcall(bcx.tcx(), None, "", BoxFreeFnLangItem); let substs = bcx.tcx().mk_substs(iter::once(Kind::from(content_ty))); let callee = Callee::def(bcx.ccx, def_id, substs); @@ -50,7 +51,7 @@ pub fn trans_exchange_free_ty<'a, 'tcx>( let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret = bcx.call(callee.reify(bcx.ccx), - &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize], None); + &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize], None); fn_ty.apply_attrs_callsite(llret); } @@ -93,17 +94,17 @@ pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'t } } -fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, args: MaybeSizedValue, t: Ty<'tcx>) { - call_drop_glue(bcx, args, t, false, None) +fn drop_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, args: LvalueRef<'tcx>) { + call_drop_glue(bcx, args, false, None) } pub fn call_drop_glue<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, - mut args: MaybeSizedValue, - t: Ty<'tcx>, + bcx: &Builder<'a, 'tcx>, + mut args: LvalueRef<'tcx>, skip_dtor: bool, funclet: Option<&'a Funclet>, ) { + let t = args.ty.to_ty(bcx.tcx()); // NB: v is an *alias* of type t here, not a direct value. debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); if bcx.ccx.shared().type_needs_drop(t) { @@ -116,11 +117,11 @@ pub fn call_drop_glue<'a, 'tcx>( let glue = get_drop_glue_core(ccx, g); let glue_type = get_drop_glue_type(ccx.shared(), t); if glue_type != t { - args.value = bcx.pointercast(args.value, type_of(ccx, glue_type).ptr_to()); + args.llval = bcx.pointercast(args.llval, type_of(ccx, glue_type).ptr_to()); } // No drop-hint ==> call standard drop glue - bcx.call(glue, &[args.value, args.meta][..1 + args.has_meta() as usize], + bcx.call(glue, &[args.llval, args.llextra][..1 + args.has_extra() as usize], funclet.map(|b| b.bundle())); } } @@ -173,8 +174,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let fcx = FunctionContext::new(ccx, llfn); - let mut bcx = fcx.get_entry_block(); + let mut bcx = Builder::new_block(ccx, llfn, "entry-block"); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -194,9 +194,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let value = get_param(llfn, 0); let ptr = if ccx.shared().type_is_sized(t) { - MaybeSizedValue::sized(value) + LvalueRef::new_sized_ty(value, t) } else { - MaybeSizedValue::unsized_(value, get_param(llfn, 1)) + LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t) }; let skip_dtor = match g { @@ -211,14 +211,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // a safe-guard, assert TyBox not used with TyContents. assert!(!skip_dtor); let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) { - let llbox = bcx.load(get_dataptr(&bcx, ptr.value)); - let info = bcx.load(get_meta(&bcx, ptr.value)); - MaybeSizedValue::unsized_(llbox, info) + let llbox = bcx.load(get_dataptr(&bcx, ptr.llval)); + let info = bcx.load(get_meta(&bcx, ptr.llval)); + LvalueRef::new_unsized_ty(llbox, info, content_ty) } else { - MaybeSizedValue::sized(bcx.load(ptr.value)) + LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty) }; - drop_ty(&bcx, ptr, content_ty); - trans_exchange_free_ty(&bcx, ptr, content_ty); + drop_ty(&bcx, ptr); + trans_exchange_free_ty(&bcx, ptr); bcx } ty::TyDynamic(..) => { @@ -226,8 +226,8 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // versus without calling Drop::drop. Assert caller is // okay with always calling the Drop impl, if any. assert!(!skip_dtor); - let dtor = bcx.load(ptr.meta); - bcx.call(dtor, &[ptr.value], None); + let dtor = bcx.load(ptr.llextra); + bcx.call(dtor, &[ptr.llval], None); bcx } ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { @@ -245,7 +245,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. let contents_scope = if !shallow_drop { - bcx.fcx().schedule_drop_adt_contents(ptr, t) + CleanupScope::schedule_drop_adt_contents(&bcx, ptr) } else { CleanupScope::noop() }; @@ -262,9 +262,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs); let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); let llret; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + let args = &[ptr.llval, ptr.llextra][..1 + ptr.has_extra() as usize]; if let Some(landing_pad) = contents_scope.landing_pad { - let normal_bcx = bcx.fcx().build_new_block("normal-return"); + let normal_bcx = bcx.build_sibling_block("normal-return"); llret = bcx.invoke(callee.reify(ccx), args, normal_bcx.llbb(), landing_pad, None); bcx = normal_bcx; } else { @@ -279,7 +279,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi } _ => { if bcx.ccx.shared().type_needs_drop(t) { - drop_structural_ty(bcx, ptr, t) + drop_structural_ty(bcx, ptr) } else { bcx } @@ -288,8 +288,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi bcx.ret_void(); } -pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, info: ValueRef) +pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef) -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); @@ -397,60 +396,64 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, - ptr: MaybeSizedValue, - t: Ty<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { - fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, - t: Ty<'tcx>, - av: adt::MaybeSizedValue, - variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) { +fn drop_structural_ty<'a, 'tcx>( + cx: Builder<'a, 'tcx>, + mut ptr: LvalueRef<'tcx> +) -> Builder<'a, 'tcx> { + fn iter_variant_fields<'a, 'tcx>( + cx: &'a Builder<'a, 'tcx>, + av: LvalueRef<'tcx>, + adt_def: &'tcx AdtDef, + variant_index: usize, + substs: &'tcx Substs<'tcx> + ) { + let variant = &adt_def.variants[variant_index]; let tcx = cx.tcx(); for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); - drop_ty(&cx, MaybeSizedValue::sized(field_ptr), arg); + let field_ptr = av.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg)); } } let mut cx = cx; + let t = ptr.ty.to_ty(cx.tcx()); match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llupvar), upvar_ty); + let llupvar = ptr.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty)); } } ty::TyArray(_, n) => { - let base = get_dataptr(&cx, ptr.value); + let base = get_dataptr(&cx, ptr.llval); let len = C_uint(cx.ccx, n); let unit_ty = t.sequence_element_type(cx.tcx()); cx = tvec::slice_for_each(&cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(&cx, ptr.value, unit_ty, ptr.meta, - |bb, vv| drop_ty(bb, MaybeSizedValue::sized(vv), unit_ty)); + cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra, + |bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty))); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr(0), i); - drop_ty(&cx, MaybeSizedValue::sized(llfld_a), *arg); + let llfld_a = ptr.trans_field_ptr(&cx, i); + drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg)); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(&cx, t, ptr, Disr::from(discr), i); - let ptr = if cx.ccx.shared().type_is_sized(field_ty) { - MaybeSizedValue::sized(llfld_a) - } else { - MaybeSizedValue::unsized_(llfld_a, ptr.meta) - }; - drop_ty(&cx, ptr, field_ty); + for (i, field) in adt.variants[0].fields.iter().enumerate() { + let field_ty = monomorphize::field_ty(cx.tcx(), substs, field); + let mut field_ptr = ptr.clone(); + field_ptr.llval = ptr.trans_field_ptr(&cx, i); + field_ptr.ty = LvalueTy::from_ty(field_ty); + if cx.ccx.shared().type_is_sized(field_ty) { + field_ptr.llextra = ptr::null_mut(); + } + drop_ty(&cx, field_ptr); } } AdtKind::Union => { @@ -462,16 +465,29 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(&cx, t, ptr.value, false) { - (adt::BranchKind::Single, None) => { + // Obtain a representation of the discriminant sufficient to translate + // destructuring; this may or may not involve the actual discriminant. + let l = cx.ccx.layout_of(t); + match *l { + layout::Univariant { .. } | + layout::UntaggedUnion { .. } => { if n_variants != 0 { assert!(n_variants == 1); - iter_variant(&cx, t, ptr, &adt.variants[0], substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: 0, + }; + iter_variant_fields(&cx, ptr, &adt, 0, substs); } } - (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + layout::CEnum { .. } | + layout::General { .. } | + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } => { + let lldiscrim_a = adt::trans_get_discr(&cx, t, ptr.llval, None, false); let tcx = cx.tcx(); - drop_ty(&cx, MaybeSizedValue::sized(lldiscrim_a), tcx.types.isize); + drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize)); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -486,23 +502,28 @@ fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); + let ret_void_cx = cx.build_sibling_block("enum-iter-ret-void"); ret_void_cx.ret_void(); let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); - let next_cx = cx.fcx().build_new_block("enum-iter-next"); + let next_cx = cx.build_sibling_block("enum-iter-next"); - for variant in &adt.variants { + for (i, variant) in adt.variants.iter().enumerate() { let variant_cx_name = format!("enum-iter-variant-{}", &variant.disr_val.to_string()); - let variant_cx = cx.fcx().build_new_block(&variant_cx_name); + let variant_cx = cx.build_sibling_block(&variant_cx_name); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); - iter_variant(&variant_cx, t, ptr, variant, substs); + ptr.ty = LvalueTy::Downcast { + adt_def: adt, + substs: substs, + variant_index: i, + }; + iter_variant_fields(&variant_cx, ptr, &adt, i, substs); variant_cx.br(next_cx.llbb()); } cx = next_cx; } - _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => bug!("{} is not an enum.", t), } } }, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 7c026cb153037..842a21e98db46 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -16,6 +16,7 @@ use llvm; use llvm::{ValueRef}; use abi::{Abi, FnType}; use adt; +use mir::lvalue::LvalueRef; use base::*; use common::*; use declare; @@ -24,10 +25,10 @@ use type_of; use machine; use type_::Type; use rustc::ty::{self, Ty}; -use Disr; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; +use builder::Builder; use rustc::session::Session; use syntax_pos::Span; @@ -87,14 +88,14 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, callee_ty: Ty<'tcx>, fn_ty: &FnType, llargs: &[ValueRef], llresult: ValueRef, span: Span) { let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); let (def_id, substs, fty) = match callee_ty.sty { ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), @@ -125,7 +126,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); + try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } "breakpoint" => { @@ -533,7 +534,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: &intrinsics::Type, arg_type: Ty<'tcx>, llarg: ValueRef) @@ -548,12 +549,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // destructors, and the contents are SIMD // etc. assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = adt::MaybeSizedValue::sized(llarg); - (0..contents.len()) - .map(|i| { - bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) - }) - .collect() + let arg = LvalueRef::new_sized_ty(llarg, arg_type); + (0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); @@ -634,7 +631,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } } -fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, tp_ty: Ty<'tcx>, @@ -670,7 +667,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, } fn memset_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: ValueRef, @@ -686,7 +683,8 @@ fn memset_intrinsic<'a, 'tcx>( } fn try_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, @@ -696,9 +694,9 @@ fn try_intrinsic<'a, 'tcx>( bcx.call(func, &[data], None); bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest); + trans_msvc_try(bcx, ccx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest); + trans_gnu_try(bcx, ccx, func, data, local_ptr, dest); } } @@ -709,24 +707,25 @@ fn try_intrinsic<'a, 'tcx>( // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; bcx.set_personality_fn(bcx.ccx.eh_personality()); - let normal = bcx.fcx().build_new_block("normal"); - let catchswitch = bcx.fcx().build_new_block("catchswitch"); - let catchpad = bcx.fcx().build_new_block("catchpad"); - let caught = bcx.fcx().build_new_block("caught"); + let normal = bcx.build_sibling_block("normal"); + let catchswitch = bcx.build_sibling_block("catchswitch"); + let catchpad = bcx.build_sibling_block("catchpad"); + let caught = bcx.build_sibling_block("caught"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); // We're generating an IR snippet that looks like: // @@ -768,7 +767,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = bcx.fcx().alloca(i64p, "slot"); + let slot = bcx.alloca(i64p, "slot"); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -812,12 +811,13 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, + ccx: &CrateContext, func: ValueRef, data: ValueRef, local_ptr: ValueRef, dest: ValueRef) { - let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let llfn = get_rust_try_fn(ccx, &mut |bcx| { let ccx = bcx.ccx; // Translates the shims described above: @@ -837,12 +837,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx().build_new_block("then"); - let catch = bcx.fcx().build_new_block("catch"); + let then = bcx.build_sibling_block("then"); + let catch = bcx.build_sibling_block("catch"); - let func = llvm::get_param(bcx.fcx().llfn, 0); - let data = llvm::get_param(bcx.fcx().llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + let func = llvm::get_param(bcx.llfn(), 0); + let data = llvm::get_param(bcx.llfn(), 1); + let local_ptr = llvm::get_param(bcx.llfn(), 2); bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); then.ret(C_i32(ccx, 0)); @@ -854,7 +854,7 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); + let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.llfn()); catch.add_clause(vals, C_null(Type::i8p(ccx))); let ptr = catch.extract_value(vals, 0); catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None); @@ -869,13 +869,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // Helper function to give a Block to a closure to translate a shim function. // This is currently primarily used for the `try` intrinsic functions above. -fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, +fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { @@ -884,8 +883,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let fcx = FunctionContext::new(ccx, llfn); - trans(fcx.get_entry_block()); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); + trans(bcx); llfn } @@ -893,10 +892,9 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // catch exceptions. // // This function is only generated once and is then cached. -fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) +fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + trans: &mut for<'b> FnMut(Builder<'b, 'tcx>)) -> ValueRef { - let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { return llfn; } @@ -910,7 +908,7 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(tcx.mk_fn_sig(iter::once(i8p), tcx.mk_nil(), false)), })); let output = tcx.types.i32; - let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); + let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); ccx.rust_try_fn().set(Some(rust_try)); return rust_try } @@ -920,7 +918,7 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic<'a, 'tcx>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, llargs: &[ValueRef], diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index cf50e7be2afb5..aecba2f57e52c 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -13,6 +13,7 @@ use llvm::{ValueRef, get_params}; use rustc::traits; use callee::{Callee, CalleeData}; use common::*; +use builder::Builder; use consts; use declare; use glue; @@ -27,7 +28,7 @@ use rustc::ty; const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, llvtable: ValueRef, vtable_index: usize) -> ValueRef { @@ -75,10 +76,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let fcx = FunctionContext::new(ccx, llfn); - let bcx = fcx.get_entry_block(); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); - let mut llargs = get_params(fcx.llfn); + let mut llargs = get_params(llfn); let fn_ret = callee.ty.fn_ret(); let fn_ty = callee.direct_fn_type(ccx, &[]); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 0321417b153aa..6d92cd99fbeb9 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -14,10 +14,11 @@ use rustc::middle::lang_items; use rustc::ty::{self, layout}; use rustc::mir; use abi::{Abi, FnType, ArgType}; -use adt::{self, MaybeSizedValue}; +use adt; use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, BlockAndBuilder, Funclet}; +use builder::Builder; +use common::{self, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; use Disr; @@ -36,14 +37,14 @@ use std::cmp; use super::{MirContext, LocalRef}; use super::analyze::CleanupKind; use super::constant::Const; -use super::lvalue::{LvalueRef}; +use super::lvalue::LvalueRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_block(&mut self, bb: mir::BasicBlock, funclets: &IndexVec>) { - let mut bcx = self.build_block(bb); + let mut bcx = self.get_builder(bb); let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); @@ -57,7 +58,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); let cleanup_bundle = funclet.map(|l| l.bundle()); - let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let funclet_br = |this: &Self, bcx: Builder, bb: mir::BasicBlock| { let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { @@ -84,7 +85,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.build_new_block(name); + let trampoline = this.new_block(name); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -208,7 +209,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); + let llscratch = bcx.alloca(ret.original_ty, "ret"); self.store_operand(&bcx, llscratch, op, None); llscratch } @@ -241,20 +242,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } - let lvalue = self.trans_lvalue(&bcx, location); + let mut lvalue = self.trans_lvalue(&bcx, location); let drop_fn = glue::get_drop_glue(bcx.ccx, ty); let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); - let ptr = if bcx.ccx.shared().type_is_sized(ty) { - let value = if drop_ty != ty { - bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) - } else { - lvalue.llval - }; - MaybeSizedValue::sized(value) - } else { - MaybeSizedValue::unsized_(lvalue.llval, lvalue.llextra) - }; - let args = &[ptr.value, ptr.meta][..1 + ptr.has_meta() as usize]; + if bcx.ccx.shared().type_is_sized(ty) && drop_ty != ty { + lvalue.llval = bcx.pointercast( + lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()); + } + let args = &[lvalue.llval, lvalue.llextra][..1 + lvalue.has_extra() as usize]; if let Some(unwind) = unwind { bcx.invoke( drop_fn, @@ -301,7 +296,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.build_new_block("panic"); + let panic_block = self.new_block("panic"); if expected { bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -584,15 +579,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty.apply_attrs_callsite(invokeret); if let Some((_, target)) = *destination { - let ret_bcx = self.build_block(target); - ret_bcx.at_start(|ret_bcx| { - self.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: sig.output(), - }; - self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); - }); + let ret_bcx = self.get_builder(target); + self.set_debug_loc(&ret_bcx, terminator.source_info); + let op = OperandRef { + val: Immediate(invokeret), + ty: sig.output(), + }; + self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -613,7 +606,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_argument(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -658,7 +651,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (mut llval, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); + let llscratch = bcx.alloca(arg.original_ty, "arg"); self.store_operand(bcx, llscratch, op, None); (llscratch, true) } else { @@ -689,7 +682,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_arguments_untupled(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -706,9 +699,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Handle both by-ref and immediate tuples. match tuple.val { Ref(llval) => { - let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); + let ptr = LvalueRef::new_sized_ty(llval, tuple.ty); + let ptr = ptr.trans_field_ptr(bcx, n); let val = if common::type_is_fat_ptr(bcx.ccx, ty) { let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) @@ -765,13 +758,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { let ccx = bcx.ccx; if let Some(slot) = self.llpersonalityslot { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.fcx().alloca(llretty, "personalityslot"); + let slot = bcx.alloca(llretty, "personalityslot"); self.llpersonalityslot = Some(slot); Lifetime::Start.call(bcx, slot); slot @@ -790,15 +783,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return self.blocks[target_bb]; } - let target = self.build_block(target_bb); + let target = self.get_builder(target_bb); - let bcx = self.fcx.build_new_block("cleanup"); + let bcx = self.new_block("cleanup"); self.landing_pads[target_bb] = Some(bcx.llbb()); let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot, None); @@ -808,18 +801,24 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.build_new_block("unreachable"); + let bl = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() }) } - pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { - BlockAndBuilder::new(self.blocks[bb], self.fcx) + pub fn new_block(&self, name: &str) -> Builder<'a, 'tcx> { + Builder::new_block(self.ccx, self.llfn, name) + } + + pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { + let builder = Builder::with_ccx(self.ccx); + builder.position_at_end(self.blocks[bb]); + builder } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { // If the return is ignored, we can just return a do-nothing ReturnDest @@ -836,14 +835,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); + let tmp = bcx.alloca_ty(ret_ty, "tmp_ret"); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) @@ -864,7 +863,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } - fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { @@ -895,7 +894,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, dest: ReturnDest, ret_ty: ArgType, op: OperandRef<'tcx>) { @@ -911,7 +910,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); + let tmp = bcx.alloca_ty(op.ty, "tmp_ret"); ret_ty.store(bcx, op.immediate(), tmp); self.trans_load(bcx, tmp, op.ty) } else { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 700894c255da6..13e659a5ae0e8 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,16 +18,17 @@ use rustc::hir::def_id::DefId; use rustc::infer::TransNormalize; use rustc::mir; use rustc::mir::tcx::LvalueTy; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::Substs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; +use builder::Builder; +use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral, C_big_integral}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; -use common::{const_to_opt_u128}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_uint, C_vector, is_undef}; +use common::const_to_opt_u128; use consts; use monomorphize::{self, Instance}; use type_of; @@ -548,16 +549,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::AggregateKind::Adt(..) | mir::AggregateKind::Closure(..) | mir::AggregateKind::Tuple => { - let disr = match *kind { - mir::AggregateKind::Adt(adt_def, index, _, _) => { - Disr::from(adt_def.variants[index].disr_val) - } - _ => Disr(0) - }; - Const::new( - adt::trans_const(self.ccx, dest_ty, disr, &fields), - dest_ty - ) + Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) } } } @@ -900,7 +892,7 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_constant(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, constant: &mir::Constant<'tcx>) -> Const<'tcx> { @@ -945,3 +937,159 @@ pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) let instance = Instance::mono(ccx.shared(), def_id); MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) } + +/// Construct a constant value, suitable for initializing a +/// GlobalVariable, given a case and constant values for its fields. +/// Note that this may have a different LLVM type (and different +/// alignment!) from the representation's `type_of`, so it needs a +/// pointer cast before use. +/// +/// The LLVM type system does not directly support unions, and only +/// pointers can be bitcast, so a constant (and, by extension, the +/// GlobalVariable initialized by it) will have a type that can vary +/// depending on which case of an enum it is. +/// +/// To understand the alignment situation, consider `enum E { V64(u64), +/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to +/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, +/// i32, i32}`, which is 4-byte aligned. +/// +/// Currently the returned value has the same size as the type, but +/// this could be changed in the future to avoid allocating unnecessary +/// space after values of shorter-than-maximum cases. +fn trans_const<'a, 'tcx>( + ccx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + kind: &mir::AggregateKind, + vals: &[ValueRef] +) -> ValueRef { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + let variant_index = match *kind { + mir::AggregateKind::Adt(_, index, _, _) => index, + _ => 0, + }; + match *l { + layout::CEnum { discr: d, min, max, .. } => { + let discr = match *kind { + mir::AggregateKind::Adt(adt_def, _, _, _) => { + Disr::from(adt_def.variants[variant_index].disr_val) + }, + _ => Disr(0), + }; + assert_eq!(vals.len(), 0); + adt::assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) + } + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[variant_index]; + let lldiscr = C_integral(Type::from_integer(ccx, d), variant_index as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); + let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); + if needed_padding > 0 { + contents.push(padding(ccx, needed_padding)); + } + C_struct(ccx, &contents[..], false) + } + layout::UntaggedUnion { ref variants, .. }=> { + assert_eq!(variant_index, 0); + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) + } + layout::Univariant { ref variant, .. } => { + assert_eq!(variant_index, 0); + let contents = build_const_struct(ccx, &variant, vals); + C_struct(ccx, &contents[..], variant.packed) + } + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0]; + if variant_index as u64 == nndiscr { + assert_eq!(vals.len(), 1); + vals[0] + } else { + C_null(type_of::sizing_type_of(ccx, nnty)) + } + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + if variant_index as u64 == nndiscr { + C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + } else { + let fields = adt::compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { + // Always use null even if it's not the `discrfield`th + // field; see #8506. + C_null(type_of::sizing_type_of(ccx, ty)) + }).collect::>(); + C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) + } + } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + } +} + +/// Building structs is a little complicated, because we might need to +/// insert padding if a field's value is less aligned than its type. +/// +/// Continuing the example from `trans_const`, a value of type `(u32, +/// E)` should have the `E` at offset 8, but if that field's +/// initializer is 4-byte aligned then simply translating the tuple as +/// a two-element struct will locate it at offset 4, and accesses to it +/// will read the wrong memory. +fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + st: &layout::Struct, + vals: &[ValueRef]) + -> Vec { + assert_eq!(vals.len(), st.offsets.len()); + + if vals.len() == 0 { + return Vec::new(); + } + + // offset of current value + let mut offset = 0; + let mut cfields = Vec::new(); + cfields.reserve(st.offsets.len()*2); + + let parts = st.field_index_by_increasing_offset().map(|i| { + (&vals[i], st.offsets[i].bytes()) + }); + for (&val, target_offset) in parts { + if offset < target_offset { + cfields.push(padding(ccx, target_offset - offset)); + offset = target_offset; + } + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + } + + if offset < st.stride().bytes() { + cfields.push(padding(ccx, st.stride().bytes() - offset)); + } + + cfields +} + +fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + un: &layout::Union, + field_val: ValueRef) + -> Vec { + let mut cfields = vec![field_val]; + + let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); + let size = un.stride().bytes(); + if offset != size { + cfields.push(padding(ccx, size - offset)); + } + + cfields +} + +fn padding(ccx: &CrateContext, size: u64) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size)) +} diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0cd7f007c5df9..bd6e70639bba5 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -9,18 +9,20 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::{self, layout, Ty, TypeFoldable}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use adt; -use base; -use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef}; +use builder::Builder; +use common::{self, CrateContext, C_uint, C_undef}; use consts; use machine; use type_of::type_of; use type_of; -use Disr; +use type_::Type; +use value::Value; +use glue; use std::ptr; @@ -39,22 +41,24 @@ pub struct LvalueRef<'tcx> { pub ty: LvalueTy<'tcx>, } -impl<'tcx> LvalueRef<'tcx> { +impl<'a, 'tcx> LvalueRef<'tcx> { pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } - pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> LvalueRef<'tcx> - { - assert!(!ty.has_erasable_regions()); - let lltemp = base::alloc_ty(bcx, ty, name); - LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) + pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef::new_sized(llval, LvalueTy::from_ty(ty)) + } + + pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> { + LvalueRef { + llval: llval, + llextra: llextra, + ty: LvalueTy::from_ty(ty), + } } - pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { let ty = self.ty.to_ty(ccx.tcx()); match ty.sty { ty::TyArray(_, n) => common::C_uint(ccx, n), @@ -65,17 +69,170 @@ impl<'tcx> LvalueRef<'tcx> { _ => bug!("unexpected type `{}` in LvalueRef::len", ty) } } + + pub fn has_extra(&self) -> bool { + !self.llextra.is_null() + } + + fn struct_field_ptr( + self, + bcx: &Builder<'a, 'tcx>, + st: &layout::Struct, + fields: &Vec>, + ix: usize, + needs_cast: bool + ) -> ValueRef { + let fty = fields[ix]; + let ccx = bcx.ccx; + + let ptr_val = if needs_cast { + let fields = st.field_index_by_increasing_offset().map(|i| { + type_of::in_memory_type_of(ccx, fields[i]) + }).collect::>(); + let real_ty = Type::struct_(ccx, &fields[..], st.packed); + bcx.pointercast(self.llval, real_ty.ptr_to()) + } else { + self.llval + }; + + // Simple case - we can just GEP the field + // * First field - Always aligned properly + // * Packed struct - There is no alignment padding + // * Field is sized - pointer is properly aligned already + if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || + bcx.ccx.shared().type_is_sized(fty) { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + + // If the type of the last field is [T] or str, then we don't need to do + // any adjusments + match fty.sty { + ty::TySlice(..) | ty::TyStr => { + return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); + } + _ => () + } + + // There's no metadata available, log the case and just do the GEP. + if !self.has_extra() { + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, Value(ptr_val)); + return bcx.struct_gep(ptr_val, ix); + } + + // We need to get the pointer manually now. + // We do this by casting to a *i8, then offsetting it by the appropriate amount. + // We do this instead of, say, simply adjusting the pointer from the result of a GEP + // because the field may have an arbitrary alignment in the LLVM representation + // anyway. + // + // To demonstrate: + // struct Foo { + // x: u16, + // y: T + // } + // + // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that + // the `y` field has 16-bit alignment. + + let meta = self.llextra; + + + let offset = st.offsets[ix].bytes(); + let unaligned_offset = C_uint(bcx.ccx, offset); + + // Get the alignment of the field + let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + + // Bump the unaligned offset up to the appropriate alignment using the + // following expression: + // + // (unaligned offset + (align - 1)) & -align + + // Calculate offset + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); + let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), + bcx.neg(align)); + + debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); + + // Cast and adjust pointer + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.gep(byte_ptr, &[offset]); + + // Finally, cast back to the type expected + let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + bcx.pointercast(byte_ptr, ll_fty.ptr_to()) + } + + /// Access a field, at a point when the value's case is known. + pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef { + let discr = match self.ty { + LvalueTy::Ty { .. } => 0, + LvalueTy::Downcast { variant_index, .. } => variant_index, + }; + let t = self.ty.to_ty(bcx.tcx()); + let l = bcx.ccx.layout_of(t); + // Note: if this ever needs to generate conditionals (e.g., if we + // decide to do some kind of cdr-coding-like non-unique repr + // someday), it will need to return a possibly-new bcx as well. + match *l { + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, 0); + self.struct_field_ptr(bcx, &variant, + &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) + } + layout::Vector { count, .. } => { + assert_eq!(discr, 0); + assert!((ix as u64) < count); + bcx.struct_gep(self.llval, ix) + } + layout::General { discr: d, ref variants, .. } => { + let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); + fields.insert(0, d.to_ty(&bcx.tcx(), false)); + self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = adt::compute_fields(bcx.ccx, t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { + let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); + // The unit-like case might have a nonzero number of unit-like fields. + // (e.d., Result of Either with (), as one side.) + let ty = type_of::type_of(bcx.ccx, nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; + assert_eq!(ix, 0); + assert_eq!(discr as u64, nndiscr); + let ty = type_of::type_of(bcx.ccx, nnty); + bcx.pointercast(self.llval, ty.ptr_to()) + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr as u64, nndiscr); + self.struct_field_ptr(bcx, &nonnull, + &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) + } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) + } + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_lvalue(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); let ccx = bcx.ccx; - let tcx = bcx.tcx(); + let tcx = ccx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { @@ -134,26 +291,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let (llprojected, llextra) = match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let base_ty = tr_base.ty.to_ty(tcx); - let discr = match tr_base.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, - }; - let discr = discr as u64; - let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); - let base = if is_sized { - adt::MaybeSizedValue::sized(tr_base.llval) - } else { - adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) - }; - let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr), - field.index()); - let llextra = if is_sized { + let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) { ptr::null_mut() } else { tr_base.llextra }; - (llprojected, llextra) + (tr_base.trans_field_ptr(bcx, field.index()), llextra) } mir::ProjectionElem::Index(ref index) => { let index = self.trans_operand(bcx, index); @@ -214,7 +357,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Perform an action using the given Lvalue. // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // is created first, then used as an operand to update the Lvalue. - pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, + pub fn with_lvalue_ref(&mut self, bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, f: F) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U { @@ -223,9 +366,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { LocalRef::Lvalue(lvalue) => f(self, lvalue), LocalRef::Operand(None) => { let lvalue_ty = self.monomorphized_lvalue_ty(lvalue); - let lvalue = LvalueRef::alloca(bcx, - lvalue_ty, - "lvalue_temp"); + assert!(!lvalue_ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp"); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty)); let ret = f(self, lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); self.locals[index] = LocalRef::Operand(Some(op)); @@ -254,18 +397,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { /// than we are. /// /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, - llindex: ValueRef) - -> ValueRef - { - let ccx = bcx.ccx; + fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); + let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type()); if index_size < int_size { - bcx.zext(llindex, ccx.int_type()) + bcx.zext(llindex, bcx.ccx.int_type()) } else if index_size > int_size { - bcx.trunc(llindex, ccx.int_type()) + bcx.trunc(llindex, bcx.ccx.int_type()) } else { llindex } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index dc8c6e89df9a4..eedd7956805b6 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -19,7 +19,8 @@ use rustc::infer::TransNormalize; use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; -use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; +use builder::Builder; +use common::{self, CrateContext, C_null, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::{self, Instance}; use abi::FnType; @@ -37,7 +38,7 @@ use rustc_data_structures::indexed_vec::{IndexVec, Idx}; pub use self::constant::trans_static_initializer; use self::analyze::CleanupKind; -use self::lvalue::{LvalueRef}; +use self::lvalue::LvalueRef; use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; @@ -48,7 +49,7 @@ pub struct MirContext<'a, 'tcx:'a> { debug_context: debuginfo::FunctionDebugContext, - fcx: &'a common::FunctionContext<'a, 'tcx>, + llfn: ValueRef, ccx: &'a CrateContext<'a, 'tcx>, @@ -106,7 +107,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) } - pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { + pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) { let (scope, span) = self.debug_loc(source_info); debuginfo::set_source_location(&self.debug_context, bcx, scope, span); } @@ -198,7 +199,8 @@ impl<'tcx> LocalRef<'tcx> { /////////////////////////////////////////////////////////////////////////// pub fn trans_mir<'a, 'tcx: 'a>( - fcx: &'a FunctionContext<'a, 'tcx>, + ccx: &'a CrateContext<'a, 'tcx>, + llfn: ValueRef, fn_ty: FnType, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, @@ -207,8 +209,8 @@ pub fn trans_mir<'a, 'tcx: 'a>( ) { debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir); - let bcx = fcx.get_entry_block(); + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfn, mir); + let bcx = Builder::new_block(ccx, llfn, "entry-block"); let cleanup_kinds = analyze::cleanup_kinds(&mir); @@ -216,20 +218,20 @@ pub fn trans_mir<'a, 'tcx: 'a>( let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { - fcx.new_block("start") + bcx.build_sibling_block("start").llbb() } else { - fcx.new_block(&format!("{:?}", bb)) + bcx.build_sibling_block(&format!("{:?}", bb)).llbb() } }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context); + let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context); let mut mircx = MirContext { mir: mir, - fcx: fcx, + llfn: llfn, fn_ty: fn_ty, - ccx: fcx.ccx, + ccx: ccx, llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, @@ -266,7 +268,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( } debug!("alloc: {:?} ({}) -> lvalue", local, name); - let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &name.as_str()); + let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)); if dbg { let (scope, span) = mircx.debug_loc(source_info); declare_local(&bcx, &mircx.debug_context, name, ty, scope, @@ -278,11 +282,13 @@ pub fn trans_mir<'a, 'tcx: 'a>( // Temporary or return pointer if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); - let llretptr = llvm::get_param(fcx.llfn, 0); + let llretptr = llvm::get_param(llfn, 0); LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); - LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local)); + LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))) } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the @@ -312,9 +318,9 @@ pub fn trans_mir<'a, 'tcx: 'a>( let funclets: IndexVec> = mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { if let CleanupKind::Funclet = *cleanup_kind { - let bcx = mircx.build_block(bb); + let bcx = mircx.get_builder(bb); bcx.set_personality_fn(mircx.ccx.eh_personality()); - if base::wants_msvc_seh(fcx.ccx.sess()) { + if base::wants_msvc_seh(ccx.sess()) { return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); } } @@ -347,13 +353,12 @@ pub fn trans_mir<'a, 'tcx: 'a>( /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect. -fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, +fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mircx: &MirContext<'a, 'tcx>, scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { let mir = mircx.mir; - let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; @@ -381,7 +386,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, _ => bug!("spread argument isn't a tuple?!") }; - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); let arg = &mircx.fn_ty.args[idx]; @@ -428,7 +433,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; llarg } else if !lvalue_locals.contains(local.index()) && @@ -444,13 +449,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, if arg.pad.is_some() { llarg_idx += 1; } - let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &mircx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); - let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; OperandValue::Pair(llarg, llmeta) } else { @@ -462,7 +467,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index)); if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, @@ -514,7 +519,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); + let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr"); bcx.store(llval, alloc, None); alloc } else { @@ -573,7 +578,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, mod analyze; mod block; mod constant; -mod lvalue; +pub mod lvalue; mod operand; mod rvalue; mod statement; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index a4af5f9e22cc4..28a247ee612a9 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -14,7 +14,8 @@ use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use value::Value; use type_of; use type_::Type; @@ -85,8 +86,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a Pair, we return an /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. let llty = type_of::type_of(bcx.ccx, self.ty); @@ -107,8 +107,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { /// If this operand is a pair in an Immediate, /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) - -> OperandRef<'tcx> { + pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. if common::type_is_imm_pair(bcx.ccx, self.ty) { @@ -136,7 +135,7 @@ impl<'a, 'tcx> OperandRef<'tcx> { impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_load(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, llval: ValueRef, ty: Ty<'tcx>) -> OperandRef<'tcx> @@ -165,7 +164,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_consume(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> OperandRef<'tcx> { @@ -217,7 +216,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx> { @@ -242,7 +241,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn store_operand(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, lldest: ValueRef, operand: OperandRef<'tcx>, align: Option) { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index dac81468be950..67fb8cf576d62 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -12,13 +12,15 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::Layout; +use rustc::mir::tcx::LvalueTy; use rustc::mir; use middle::lang_items::ExchangeMallocFnLangItem; use asm; use base; +use builder::Builder; use callee::Callee; -use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; +use common::{self, val_ty, C_bool, C_null, C_uint}; use common::{C_integral}; use adt; use machine; @@ -35,10 +37,10 @@ use super::lvalue::{LvalueRef}; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, dest: LvalueRef<'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> BlockAndBuilder<'a, 'tcx> + -> Builder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); @@ -79,7 +81,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); + let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, lltemp, operand.ty); lltemp } @@ -101,7 +103,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { - mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); let dest_ty = dest.ty.to_ty(bcx.tcx()); adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); @@ -109,10 +111,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. if !common::type_is_zero_size(bcx.ccx, op.ty) { - let val = adt::MaybeSizedValue::sized(dest.llval); + let mut val = LvalueRef::new_sized(dest.llval, dest.ty); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, - field_index); + val.ty = LvalueTy::Downcast { + adt_def: adt_def, + substs: self.monomorphize(&substs), + variant_index: disr.0 as usize, + }; + let lldest_i = val.trans_field_ptr(&bcx, field_index); self.store_operand(&bcx, lldest_i, op, None); } } @@ -170,9 +176,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_rvalue_operand(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, rvalue: &mir::Rvalue<'tcx>) - -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) + -> (Builder<'a, 'tcx>, OperandRef<'tcx>) { assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); @@ -477,7 +483,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -552,7 +558,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_fat_ptr_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, @@ -599,7 +605,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } pub fn trans_scalar_checked_binop(&mut self, - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -681,7 +687,7 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef { +fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{TyInt, TyUint}; diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index cc85f68c197ec..48fc9720e4b83 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -11,7 +11,8 @@ use rustc::mir; use base; -use common::{self, BlockAndBuilder}; +use common; +use builder::Builder; use super::MirContext; use super::LocalRef; @@ -20,9 +21,9 @@ use super::super::disr::Disr; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, statement: &mir::Statement<'tcx>) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); self.set_debug_loc(&bcx, statement.source_info); @@ -77,10 +78,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } fn trans_storage_liveness(&self, - bcx: BlockAndBuilder<'a, 'tcx>, + bcx: Builder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, intrinsic: base::Lifetime) - -> BlockAndBuilder<'a, 'tcx> { + -> Builder<'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Lvalue(tr_lval) = self.locals[index] { intrinsic.call(&bcx, tr_lval.llval); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index c09726fda0810..cbcbb02bdc890 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -9,28 +9,29 @@ // except according to those terms. use llvm; +use builder::Builder; use llvm::ValueRef; use common::*; use rustc::ty::Ty; pub fn slice_for_each<'a, 'tcx, F>( - bcx: &BlockAndBuilder<'a, 'tcx>, + bcx: &Builder<'a, 'tcx>, data_ptr: ValueRef, unit_ty: Ty<'tcx>, len: ValueRef, f: F -) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { +) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) let zst = type_is_zero_size(bcx.ccx, unit_ty); - let add = |bcx: &BlockAndBuilder, a, b| if zst { + let add = |bcx: &Builder, a, b| if zst { bcx.add(a, b) } else { bcx.inbounds_gep(a, &[b]) }; - let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); - let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); - let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); + let body_bcx = bcx.build_sibling_block("slice_loop_body"); + let next_bcx = bcx.build_sibling_block("slice_loop_next"); + let header_bcx = bcx.build_sibling_block("slice_loop_header"); let start = if zst { C_uint(bcx.ccx, 0usize) diff --git a/src/test/codegen/stores.rs b/src/test/codegen/stores.rs index 9141b7245e35a..6135f49eb711b 100644 --- a/src/test/codegen/stores.rs +++ b/src/test/codegen/stores.rs @@ -24,8 +24,8 @@ pub struct Bytes { // dependent alignment #[no_mangle] pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { -// CHECK: %arg1 = alloca [4 x i8] // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca [4 x i8] // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast [4 x i8]* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8* @@ -38,8 +38,8 @@ pub fn small_array_alignment(x: &mut [i8; 4], y: [i8; 4]) { // dependent alignment #[no_mangle] pub fn small_struct_alignment(x: &mut Bytes, y: Bytes) { -// CHECK: %arg1 = alloca %Bytes // CHECK: [[TMP:%.+]] = alloca i32 +// CHECK: %arg1 = alloca %Bytes // CHECK: store i32 %1, i32* [[TMP]] // CHECK: [[Y8:%[0-9]+]] = bitcast %Bytes* %arg1 to i8* // CHECK: [[TMP8:%[0-9]+]] = bitcast i32* [[TMP]] to i8*