From d9df914e32d72a453fe43ce77498cc0229f99d7a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 14 Aug 2023 17:13:52 +0300 Subject: [PATCH 01/15] [CIR][CIRGen] adds bitfields support. Fixes #13 --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 23 ++ clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 229 +++++++++++++++++- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 139 +++++------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 15 +- clang/lib/CIR/CodeGen/CIRGenRecordLayout.h | 10 + clang/lib/CIR/CodeGen/CIRGenValue.h | 34 +++ .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 6 +- 7 files changed, 375 insertions(+), 81 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index c372cffb7d75..3b6fd8d7de23 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -377,6 +377,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return getConstInt( loc, t, isSigned ? intVal.getSExtValue() : intVal.getZExtValue()); } + mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, + const llvm::APInt &val) { + return create(loc, typ, + getAttr(typ, val)); + } mlir::cir::ConstantOp getBool(bool state, mlir::Location loc) { return create(loc, getBoolTy(), getCIRBoolAttr(state)); @@ -625,6 +630,24 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } + + //===--------------------------------------------------------------------===// + // Misc + //===--------------------------------------------------------------------===// + + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index e68f919f6b91..c9f62bec6c4b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -22,6 +22,7 @@ #include "clang/AST/GlobalDecl.h" #include "clang/Basic/Builtins.h" #include "clang/CIR/Dialect/IR/CIRDialect.h" +#include "clang/CIR/Dialect/IR/CIROpsEnums.h" #include "clang/CIR/Dialect/IR/CIRTypes.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ErrorHandling.h" @@ -134,7 +135,19 @@ static Address buildPointerWithAlignment(const Expr *E, *BaseInfo = InnerBaseInfo; if (isa(CE)) { - llvm_unreachable("NYI"); + LValueBaseInfo TargetTypeBaseInfo; + + CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( + E->getType(), &TargetTypeBaseInfo); + + // If the source l-value is opaque, honor the alignment of the + // casted-to type. + if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { + if (BaseInfo) + BaseInfo->mergeForCast(TargetTypeBaseInfo); + Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, + IsKnownNonNull); + } } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && @@ -197,12 +210,64 @@ static Address buildPointerWithAlignment(const Expr *E, return Address(CGF.buildScalarExpr(E), Align); } +/// Helper method to check if the underlying ABI is AAPCS +static bool isAAPCS(const TargetInfo &TargetInfo) { + return TargetInfo.getABI().startswith("aapcs"); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); if (field->isBitField()) { - llvm_unreachable("NYI"); + const CIRGenRecordLayout &RL = + CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + const CIRGenBitFieldInfo &Info = RL.getBitFieldInfo(field); + const bool UseVolatile = isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().AAPCSBitfieldWidth && + Info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); + Address Addr = base.getAddress(); + unsigned Idx = RL.getCIRFieldNo(field); + const RecordDecl *rec = field->getParent(); + if (!UseVolatile) { + if (!IsInPreservedAIRegion && + (!getDebugInfo() || !rec->hasAttr())) { + if (Idx != 0) { + auto loc = getLoc(field->getLocation()); + auto fieldType = convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().create( + loc, fieldPtr, base.getPointer(), field->getName(), Idx); + + Addr = Address(sea->getResult(0), CharUnits::One()); + } + } else { + llvm_unreachable("NYI"); + } + } + const unsigned SS = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + + // Get the access type. + mlir::Type FieldIntTy = + mlir::cir::IntType::get(builder.getContext(), SS, false); + + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != FieldIntTy) + Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + if (UseVolatile) { + llvm_unreachable("NYI"); + } + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + // TODO: Support TBAA for bit fields. + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo); } // Fields of may-alias structures are may-alais themselves. @@ -354,8 +419,7 @@ static CIRGenCallee buildDirectCallee(CIRGenModule &CGM, GlobalDecl GD) { // When directing calling an inline builtin, call it through it's mangled // name to make it clear it's not the actual builtin. auto Fn = cast(CGF.CurFn); - if (Fn.getName() != FDInlineName && - onlyHasInlineBuiltinDeclaration(FD)) { + if (Fn.getName() != FDInlineName && onlyHasInlineBuiltinDeclaration(FD)) { assert(0 && "NYI"); } @@ -500,12 +564,58 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue, /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) { - assert(LV.isSimple() && "not implemented"); assert(!LV.getType()->isFunctionType()); assert(!(LV.getType()->isConstantMatrixType()) && "not implemented"); - // Everything needs a load. - return RValue::get(buildLoadOfScalar(LV, Loc)); + if (LV.isBitField()) + return buildLoadOfBitfieldLValue(LV, Loc); + + if (LV.isSimple()) + return RValue::get(buildLoadOfScalar(LV, Loc)); + llvm_unreachable("NYI"); +} + +RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, + SourceLocation Loc) { + const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo(); + + // Get the output type. + mlir::Type ResLTy = convertType(LV.getType()); + Address Ptr = LV.getBitFieldAddress(); + mlir::Value Val = builder.createLoad(getLoc(Loc), Ptr); + auto ValWidth = Val.getType().cast().getWidth(); + + bool UseVolatile = LV.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + + if (Info.IsSigned) { + assert(static_cast(Offset + Info.Size) <= StorageSize); + + mlir::Type typ = + mlir::cir::IntType::get(builder.getContext(), ValWidth, true); + Val = builder.createIntCast(Val, typ); + + unsigned HighBits = StorageSize - Offset - Info.Size; + if (HighBits) + Val = builder.createShift(Val, llvm::APInt(ValWidth, HighBits), true); + if (Offset + HighBits) + Val = builder.createShift(Val, llvm::APInt(ValWidth, Offset + HighBits), + false); + } else { + if (Offset) + Val = builder.createShift(Val, llvm::APInt(ValWidth, Offset), false); + + if (static_cast(Offset) + Info.Size < StorageSize) + Val = + builder.createBinop(Val, mlir::cir::BinOpKind::And, + llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); + } + Val = builder.createIntCast(Val, ResLTy); + // EmitScalarRangeCheck(Val, LV.getType(), Loc); //FIXME: TODO + return RValue::get(Val); } void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { @@ -528,6 +638,94 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { buildStoreOfScalar(Src.getScalarVal(), Dst); } +void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value *Result) { + const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); + mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); + Address Ptr = Dst.getBitFieldAddress(); + + // Get the source value, truncated to the width of the bit-field. + mlir::Value SrcVal = Src.getScalarVal(); + + // Cast the source to the storage type and shift it into place. + SrcVal = builder.createIntCast(SrcVal, Ptr.getElementType()); + auto SrcWidth = SrcVal.getType().cast().getWidth(); + mlir::Value MaskedVal = SrcVal; + + const bool UseVolatile = + CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() && + Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget()); + const unsigned StorageSize = + UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset; + // See if there are other bits in the bitfield's storage we'll need to load + // and mask together with source before storing. + if (StorageSize != Info.Size) { + assert(StorageSize > Info.Size && "Invalid bitfield size."); + + mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); + + mlir::Type Ty = SrcVal.getType(); + // Mask the source value as needed. + if (!hasBooleanRepresentation(Dst.getType())) + SrcVal = + builder.createBinop(SrcVal, mlir::cir::BinOpKind::And, + llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + + MaskedVal = SrcVal; + if (Offset) + SrcVal = builder.createShift(SrcVal, llvm::APInt(SrcWidth, Offset), true); + + // Mask out the original value. + Val = builder.createBinop( + Val, mlir::cir::BinOpKind::And, + ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + + // Or together the unchanged values and the source value. + SrcVal = builder.create( + Val.getLoc(), Ty, mlir::cir::BinOpKind::Or, Val, SrcVal); + } else { + assert(Offset == 0); + + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + builder.createLoad(Dst.getPointer().getLoc(), Ptr); + } + + // Write the new value back out. + // TODO: constant matrix type, volatile, no init, non temporal, TBAA + buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), + Dst.getBaseInfo(), false, false); + + // Return the new value of the bit-field, if requested. + if (Result) { + mlir::Value ResultVal = MaskedVal; + ResultVal = builder.createIntCast(ResultVal, ResLTy); + auto bitwidth = ResultVal.getType().cast().getWidth(); + + // Sign extend the value if needed. + if (Info.IsSigned) { + assert(Info.Size <= StorageSize); + unsigned HighBits = StorageSize - Info.Size; + + if (HighBits) { + ResultVal = builder.createShift(ResultVal, + llvm::APInt(bitwidth, HighBits), true); + + ResultVal = builder.createShift(ResultVal, + llvm::APInt(bitwidth, HighBits), false); + } + } + + *Result = buildFromMemory(ResultVal, Dst.getType()); + } +} + static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); @@ -2180,6 +2378,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty, mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, SourceLocation Loc) { + return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), + lvalue.getType(), getLoc(Loc), lvalue.getBaseInfo(), + lvalue.isNontemporal()); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, + mlir::Location Loc) { return buildLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getType(), Loc, lvalue.getBaseInfo(), lvalue.isNontemporal()); @@ -2198,6 +2403,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal) { + return buildLoadOfScalar(Addr, Volatile, Ty, getLoc(Loc), BaseInfo, + isNontemporal); +} + +mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, + QualType Ty, mlir::Location Loc, + LValueBaseInfo BaseInfo, + bool isNontemporal) { if (!CGM.getCodeGenOpts().PreserveVec3Type) { if (Ty->isVectorType()) { llvm_unreachable("NYI"); @@ -2211,7 +2424,7 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, } mlir::cir::LoadOp Load = builder.create( - getLoc(Loc), Addr.getElementType(), Addr.getPointer()); + Loc, Addr.getElementType(), Addr.getPointer()); if (isNontemporal) { llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 8569b8ed7d84..3bc99a8ddc96 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -212,11 +212,8 @@ class ScalarExprEmitter : public StmtVisitor { /// Emits the address of the l-value, then loads and returns the result. mlir::Value buildLoadOfLValue(const Expr *E) { LValue LV = CGF.buildLValue(E); - auto load = Builder.create(CGF.getLoc(E->getExprLoc()), - CGF.getCIRType(E->getType()), - LV.getPointer()); // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V); - return load; + return CGF.buildLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); } mlir::Value buildLoadOfLValue(LValue LV, SourceLocation Loc) { @@ -1863,7 +1860,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - llvm_unreachable("NYI"); + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, @@ -1964,25 +1961,27 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( auto condV = CGF.evaluateExprAsBool(condExpr); assert(!UnimplementedFeature::incrementProfileCounter()); - return builder.create( - loc, condV, /*thenBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto lhs = Visit(lhsExpr); - if (!lhs) { - lhs = builder.getNullValue(CGF.VoidTy, loc); - lhsIsVoid = true; - } - builder.create(loc, lhs); - }, - /*elseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - auto rhs = Visit(rhsExpr); - if (lhsIsVoid) { - assert(!rhs && "lhs and rhs types must match"); - rhs = builder.getNullValue(CGF.VoidTy, loc); - } - builder.create(loc, rhs); - }).getResult(); + return builder + .create( + loc, condV, /*thenBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto lhs = Visit(lhsExpr); + if (!lhs) { + lhs = builder.getNullValue(CGF.VoidTy, loc); + lhsIsVoid = true; + } + builder.create(loc, lhs); + }, + /*elseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + auto rhs = Visit(rhsExpr); + if (lhsIsVoid) { + assert(!rhs && "lhs and rhs types must match"); + rhs = builder.getNullValue(CGF.VoidTy, loc); + } + builder.create(loc, rhs); + }) + .getResult(); } mlir::Value condV = CGF.buildOpOnBoolExpr(condExpr, loc, lhsExpr, rhsExpr); @@ -2012,51 +2011,53 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } }; - return builder.create( - loc, condV, /*trueBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto lhs = Visit(lhsExpr); - eval.end(CGF); - - if (lhs) { - yieldTy = lhs.getType(); - b.create(loc, lhs); - return; - } - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - }, - /*falseBuilder=*/ - [&](mlir::OpBuilder &b, mlir::Location loc) { - CIRGenFunction::LexicalScopeContext lexScope{loc, - b.getInsertionBlock()}; - CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; - CGF.currLexScope->setAsTernary(); - - assert(!UnimplementedFeature::incrementProfileCounter()); - eval.begin(CGF); - auto rhs = Visit(rhsExpr); - eval.end(CGF); - - if (rhs) { - yieldTy = rhs.getType(); - b.create(loc, rhs); - } else { - // If LHS or RHS is a throw or void expression we need to patch arms - // as to properly match yield types. - insertPoints.push_back(b.saveInsertionPoint()); - } - - patchVoidOrThrowSites(); - }).getResult(); + return builder + .create( + loc, condV, /*trueBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexThenGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto lhs = Visit(lhsExpr); + eval.end(CGF); + + if (lhs) { + yieldTy = lhs.getType(); + b.create(loc, lhs); + return; + } + // If LHS or RHS is a throw or void expression we need to patch arms + // as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + }, + /*falseBuilder=*/ + [&](mlir::OpBuilder &b, mlir::Location loc) { + CIRGenFunction::LexicalScopeContext lexScope{loc, + b.getInsertionBlock()}; + CIRGenFunction::LexicalScopeGuard lexElseGuard{CGF, &lexScope}; + CGF.currLexScope->setAsTernary(); + + assert(!UnimplementedFeature::incrementProfileCounter()); + eval.begin(CGF); + auto rhs = Visit(rhsExpr); + eval.end(CGF); + + if (rhs) { + yieldTy = rhs.getType(); + b.create(loc, rhs); + } else { + // If LHS or RHS is a throw or void expression we need to patch + // arms as to properly match yield types. + insertPoints.push_back(b.saveInsertionPoint()); + } + + patchVoidOrThrowSites(); + }) + .getResult(); } mlir::Value CIRGenFunction::buildScalarPrePostIncDec(const UnaryOperator *E, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 1459dff6f121..e426cab16331 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -577,7 +577,8 @@ class CIRGenFunction : public CIRGenTypeCache { const CIRGenFunctionInfo *CurFnInfo; clang::QualType FnRetTy; - /// This is the current function or global initializer that is generated code for. + /// This is the current function or global initializer that is generated code + /// for. mlir::Operation *CurFn = nullptr; /// Save Parameter Decl for coroutine. @@ -593,7 +594,7 @@ class CIRGenFunction : public CIRGenTypeCache { CIRGenModule &getCIRGenModule() { return CGM; } - mlir::Block* getCurFunctionEntryBlock() { + mlir::Block *getCurFunctionEntryBlock() { auto Fn = dyn_cast(CurFn); assert(Fn && "other callables NYI"); return &Fn.getRegion().front(); @@ -868,6 +869,12 @@ class CIRGenFunction : public CIRGenTypeCache { clang::SourceLocation Loc, LValueBaseInfo BaseInfo, bool isNontemporal = false); + mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, + mlir::Location Loc, LValueBaseInfo BaseInfo, + bool isNontemporal = false); + + RValue buildLoadOfBitfieldLValue(LValue LV, SourceLocation Loc); + /// Load a scalar value from an address, taking care to appropriately convert /// from the memory representation to CIR value representation. mlir::Value buildLoadOfScalar(Address Addr, bool Volatile, clang::QualType Ty, @@ -882,6 +889,7 @@ class CIRGenFunction : public CIRGenTypeCache { /// form the memory representation to the CIR value representation. The /// l-value must be a simple l-value. mlir::Value buildLoadOfScalar(LValue lvalue, clang::SourceLocation Loc); + mlir::Value buildLoadOfScalar(LValue lvalue, mlir::Location Loc); Address buildLoadOfReference(LValue RefLVal, mlir::Location Loc, LValueBaseInfo *PointeeBaseInfo = nullptr); @@ -1199,6 +1207,9 @@ class CIRGenFunction : public CIRGenTypeCache { /// is 'Ty'. void buildStoreThroughLValue(RValue Src, LValue Dst); + void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, + mlir::Value *Result); + mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h index b1ded0017d59..0a686181db61 100644 --- a/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h +++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayout.h @@ -187,6 +187,16 @@ class CIRGenRecordLayout { /// Check whether this struct can be C++ zero-initialized with a /// zeroinitializer. bool isZeroInitializable() const { return IsZeroInitializable; } + + /// Return the BitFieldInfo that corresponds to the field FD. + const CIRGenBitFieldInfo &getBitFieldInfo(const clang::FieldDecl *FD) const { + FD = FD->getCanonicalDecl(); + assert(FD->isBitField() && "Invalid call for non-bit-field decl!"); + llvm::DenseMap::const_iterator + it = BitFields.find(FD); + assert(it != BitFields.end() && "Unable to find bitfield info"); + return it->second; + } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h index f84c20c4b136..c6edeb4d4fe4 100644 --- a/clang/lib/CIR/CodeGen/CIRGenValue.h +++ b/clang/lib/CIR/CodeGen/CIRGenValue.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_LIB_CIR_CIRGENVALUE_H #include "Address.h" +#include "CIRGenRecordLayout.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" @@ -207,6 +208,7 @@ class LValue { mlir::Value V; mlir::Type ElementType; LValueBaseInfo BaseInfo; + const CIRGenBitFieldInfo *BitFieldInfo{0}; public: bool isSimple() const { return LVType == Simple; } @@ -298,6 +300,38 @@ class LValue { const clang::Qualifiers &getQuals() const { return Quals; } clang::Qualifiers &getQuals() { return Quals; } + + // bitfield lvalue + Address getBitFieldAddress() const { + return Address(getBitFieldPointer(), ElementType, getAlignment()); + } + + mlir::Value getBitFieldPointer() const { + assert(isBitField()); + return V; + } + + const CIRGenBitFieldInfo &getBitFieldInfo() const { + assert(isBitField()); + return *BitFieldInfo; + } + + /// Create a new object to represent a bit-field access. + /// + /// \param Addr - The base address of the bit-field sequence this + /// bit-field refers to. + /// \param Info - The information describing how to perform the bit-field + /// access. + static LValue MakeBitfield(Address Addr, const CIRGenBitFieldInfo &Info, + clang::QualType type, LValueBaseInfo BaseInfo) { + LValue R; + R.LVType = BitField; + R.V = Addr.getPointer(); + R.ElementType = Addr.getElementType(); + R.BitFieldInfo = &Info; + R.Initialize(type, type.getQualifiers(), Addr.getAlignment(), BaseInfo); + return R; + } }; /// An aggregate value slot. diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index ea170233da4b..6559f42b507f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -214,8 +214,8 @@ CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes, cxxRecordDecl{llvm::dyn_cast(recordDecl)}, astRecordLayout{cirGenTypes.getContext().getASTRecordLayout(recordDecl)}, dataLayout{cirGenTypes.getModule().getModule()}, - IsZeroInitializable(true), IsZeroInitializableAsBase(true), - isPacked{isPacked} {} + IsZeroInitializable(true), + IsZeroInitializableAsBase(true), isPacked{isPacked} {} void CIRRecordLowering::setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, @@ -474,6 +474,8 @@ void CIRRecordLowering::accumulateBitFields( // with lower cost. auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, uint64_t StartBitOffset) { + if (OffsetInRecord >= 64) // See IntType::verify + return true; if (!cirGenTypes.getModule().getCodeGenOpts().FineGrainedBitfieldAccesses) return false; llvm_unreachable("NYI"); From b717c07cd967b248da771a9742e5f38778bdaf93 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 23 Aug 2023 12:53:04 +0300 Subject: [PATCH 02/15] adds two simple tests --- clang/test/CIR/CodeGen/bitfields.cpp | 51 +++++++++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index e06b89191985..a7f7529b1a53 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -15,4 +15,53 @@ void m() { } // CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !u32i, #cir.recdecl.ast> -// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> \ No newline at end of file +// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> + + +struct S { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +}; // 65 bits in total, i.e. mote than 64 + +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func @_Z11store_field +// CHECK: %1 = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: %3 = cir.cast(integral, %1 : !s32i), !u32i +// CHECK: %4 = cir.load %2 : cir.ptr , !u32i +// CHECK: %5 = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: %6 = cir.binop(and, %3, %5) : !u32i +// CHECK: %7 = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: %8 = cir.binop(and, %4, %7) : !u32i +// CHECK: %9 = cir.binop(or, %8, %6) : !u32i +// CHECK: cir.store %9, %2 : !u32i, cir.ptr + +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func @_Z15store_neg_field +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: %2 = cir.unary(minus, %1) : !s32i, !s32i +// CHECK: %3 = "cir.struct_element_addr"(%0) {member_index = 1 : index, member_name = "d"} : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr +// CHECK: %5 = cir.cast(integral, %2 : !s32i), !u24i +// CHECK: %6 = cir.load %4 : cir.ptr , !u24i +// CHECK: %7 = cir.const(#cir.int<3> : !u24i) : !u24i +// CHECK: %8 = cir.binop(and, %5, %7) : !u24i +// CHECK: %9 = cir.const(#cir.int<17> : !u24i) : !u24i +// CHECK: %10 = cir.shift(left, %8 : !u24i, %9 : !u24i) -> !u24i +// CHECK: %11 = cir.const(#cir.int<16383999> : !u24i) : !u24i +// CHECK: %12 = cir.binop(and, %6, %11) : !u24i +// CHECK: %13 = cir.binop(or, %12, %10) : !u24i +// CHECK: cir.store %13, %4 : !u24i, cir.ptr + +// 16383999 decimal = 1111 1001 1111 1111 1111 1111 binary \ No newline at end of file From 6d1bd164d25cc3e9a3cb764557e8a8919c567e09 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 24 Aug 2023 10:41:13 +0300 Subject: [PATCH 03/15] refactoring, added helpers --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 81 ++++++++++--- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 134 +++++++++++---------- clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 4 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 + clang/test/CIR/CodeGen/bitfields.cpp | 8 +- 5 files changed, 146 insertions(+), 83 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 3b6fd8d7de23..e611cb109050 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -266,6 +266,11 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } + + mlir::cir::IntType getCustomIntTy(unsigned size, bool isSigned) { + return mlir::cir::IntType::get(getContext(), size, isSigned); + } + bool isInt8Ty(mlir::Type i) { return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; } @@ -581,6 +586,65 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::UnaryOpKind::Not, value); } + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, + const llvm::APInt &rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); + } + + mlir::Value createBinop(mlir::Value lhs, + mlir::cir::BinOpKind kind, + mlir::Value rhs) { + return create( + lhs.getLoc(), lhs.getType(), kind, lhs, rhs); + } + + mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, + bool isShiftLeft) { + return create( + lhs.getLoc(), lhs.getType(), lhs, + getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); + } + + mlir::Value createShift(mlir::Value lhs, unsigned bits, bool isShiftLeft) { + auto width = lhs.getType().dyn_cast().getWidth(); + auto shift = llvm::APInt(width, bits); + return createShift(lhs, shift, isShiftLeft); + } + + mlir::Value createShiftLeft(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, true); + } + + mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { + return createShift(lhs, bits, false); + } + + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, unsigned bits) { + auto val = llvm::APInt::getLowBitsSet(size, bits); + auto typ = mlir::cir::IntType::get(getContext(), size, false); + return getConstAPInt(loc, typ, val); + } + + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::And, val); + } + + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); + } + + mlir::Value createOr(mlir::Value lhs, llvm::APInt rhs) { + auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); + return createBinop(lhs, mlir::cir::BinOpKind::Or, val); + } + + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); + } + //===--------------------------------------------------------------------===// // Cast/Conversion Operators //===--------------------------------------------------------------------===// @@ -631,23 +695,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } - //===--------------------------------------------------------------------===// - // Misc - //===--------------------------------------------------------------------===// - - mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, - const llvm::APInt &rhs) { - return create( - lhs.getLoc(), lhs.getType(), kind, lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); - } - - mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, - bool isShiftLeft) { - return create( - lhs.getLoc(), lhs.getType(), lhs, - getConstAPInt(lhs.getLoc(), lhs.getType(), rhs), isShiftLeft); - } }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c9f62bec6c4b..c507062b2d5b 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -9,6 +9,7 @@ // This contains code to emit Expr nodes as CIR code. // //===----------------------------------------------------------------------===// +#include #include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" @@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E, if (PtrTy->getPointeeType()->isVoidType()) break; assert(!UnimplementedFeature::tbaa()); + LValueBaseInfo InnerBaseInfo; Address Addr = CGF.buildPointerWithAlignment( CE->getSubExpr(), &InnerBaseInfo, IsKnownNonNull); @@ -135,19 +137,24 @@ static Address buildPointerWithAlignment(const Expr *E, *BaseInfo = InnerBaseInfo; if (isa(CE)) { - LValueBaseInfo TargetTypeBaseInfo; - - CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( - E->getType(), &TargetTypeBaseInfo); - - // If the source l-value is opaque, honor the alignment of the - // casted-to type. - if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { - if (BaseInfo) - BaseInfo->mergeForCast(TargetTypeBaseInfo); - Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, - IsKnownNonNull); - } + assert(0 && "not implemented"); + + + //TODO: remove it later + // LValueBaseInfo TargetTypeBaseInfo; + + // CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( + // E->getType(), &TargetTypeBaseInfo); + // assert(!UnimplementedFeature::tbaa()); + + // // If the source l-value is opaque, honor the alignment of the + // // casted-to type. + // if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { + // if (BaseInfo) + // BaseInfo->mergeForCast(TargetTypeBaseInfo); + // Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, + // IsKnownNonNull); + // } } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && @@ -215,10 +222,24 @@ static bool isAAPCS(const TargetInfo &TargetInfo) { return TargetInfo.getABI().startswith("aapcs"); } +Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, unsigned index) { + if (index == 0) + return base.getAddress(); + + auto loc = getLoc(field->getLocation()); + auto fieldType = convertType(field->getType()); + auto fieldPtr = + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + auto sea = getBuilder().create( + loc, fieldPtr, base.getPointer(), field->getName(), index); + + return Address(sea->getResult(0), CharUnits::One()); +} + LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); - + if (field->isBitField()) { const CIRGenRecordLayout &RL = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); @@ -234,17 +255,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, const RecordDecl *rec = field->getParent(); if (!UseVolatile) { if (!IsInPreservedAIRegion && - (!getDebugInfo() || !rec->hasAttr())) { - if (Idx != 0) { - auto loc = getLoc(field->getLocation()); - auto fieldType = convertType(field->getType()); - auto fieldPtr = - mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); - auto sea = getBuilder().create( - loc, fieldPtr, base.getPointer(), field->getName(), Idx); - - Addr = Address(sea->getResult(0), CharUnits::One()); - } + (!getDebugInfo() || !rec->hasAttr())) { + if (Idx != 0) + Addr = getAddrOfField(base, field, Idx); } else { llvm_unreachable("NYI"); } @@ -253,12 +266,12 @@ LValue CIRGenFunction::buildLValueForField(LValue base, UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; // Get the access type. - mlir::Type FieldIntTy = - mlir::cir::IntType::get(builder.getContext(), SS, false); - + mlir::Type FieldIntTy = builder.getCustomIntTy(SS, false); + auto loc = getLoc(field->getLocation()); - if (Addr.getElementType() != FieldIntTy) + if (Addr.getElementType() != FieldIntTy) { Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + } if (UseVolatile) { llvm_unreachable("NYI"); } @@ -594,8 +607,7 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, if (Info.IsSigned) { assert(static_cast(Offset + Info.Size) <= StorageSize); - mlir::Type typ = - mlir::cir::IntType::get(builder.getContext(), ValWidth, true); + mlir::Type typ = builder.getCustomIntTy(ValWidth, true); Val = builder.createIntCast(Val, typ); unsigned HighBits = StorageSize - Offset - Info.Size; @@ -665,36 +677,34 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value Val = buildLoadOfScalar(Dst, Dst.getPointer().getLoc()); - mlir::Type Ty = SrcVal.getType(); // Mask the source value as needed. if (!hasBooleanRepresentation(Dst.getType())) - SrcVal = - builder.createBinop(SrcVal, mlir::cir::BinOpKind::And, - llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); - + SrcVal = builder.createAnd(SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + MaskedVal = SrcVal; if (Offset) - SrcVal = builder.createShift(SrcVal, llvm::APInt(SrcWidth, Offset), true); + SrcVal = builder.createShiftLeft(SrcVal, Offset); // Mask out the original value. - Val = builder.createBinop( - Val, mlir::cir::BinOpKind::And, + Val = builder.createAnd(Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); // Or together the unchanged values and the source value. - SrcVal = builder.create( - Val.getLoc(), Ty, mlir::cir::BinOpKind::Or, Val, SrcVal); + SrcVal = builder.createOr(Val, SrcVal); + } else { - assert(Offset == 0); + assert(0 && "not implemented"); + // assert(Offset == 0); - // According to the AACPS: - // When a volatile bit-field is written, and its container does not overlap - // with any non-bit-field member, its container must be read exactly once - // and written exactly once using the access width appropriate to the type - // of the container. The two accesses are not atomic. - if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && - CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) - builder.createLoad(Dst.getPointer().getLoc(), Ptr); + //TODO: remove it later + // // According to the AACPS: + // // When a volatile bit-field is written, and its container does not overlap + // // with any non-bit-field member, its container must be read exactly once + // // and written exactly once using the access width appropriate to the type + // // of the container. The two accesses are not atomic. + // if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && + // CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) + // builder.createLoad(Dst.getPointer().getLoc(), Ptr); } // Write the new value back out. @@ -705,8 +715,7 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, // Return the new value of the bit-field, if requested. if (Result) { mlir::Value ResultVal = MaskedVal; - ResultVal = builder.createIntCast(ResultVal, ResLTy); - auto bitwidth = ResultVal.getType().cast().getWidth(); + ResultVal = builder.createIntCast(ResultVal, ResLTy); // Sign extend the value if needed. if (Info.IsSigned) { @@ -714,11 +723,8 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, unsigned HighBits = StorageSize - Info.Size; if (HighBits) { - ResultVal = builder.createShift(ResultVal, - llvm::APInt(bitwidth, HighBits), true); - - ResultVal = builder.createShift(ResultVal, - llvm::APInt(bitwidth, HighBits), false); + ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftRight(ResultVal, HighBits); } } @@ -949,7 +955,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { LValue LV = buildLValue(E->getLHS()); SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; - buildStoreThroughLValue(RV, LV); + if (LV.isBitField()) { + mlir::Value *val{0}; + buildStoreThroughBitfieldLValue(RV, LV, val); + } else { + buildStoreThroughLValue(RV, LV); + } + assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; @@ -2392,9 +2404,9 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { // Bool has a different representation in memory than in registers. - if (hasBooleanRepresentation(Ty)) { - llvm_unreachable("NYI"); - } + // if (hasBooleanRepresentation(Ty)) { + // llvm_unreachable("NYI"); + // } return Value; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 3bc99a8ddc96..71e74f37e1a8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1057,9 +1057,7 @@ static mlir::Value buildPointerArithmetic(CIRGenFunction &CGF, std::swap(pointerOperand, indexOperand); } - bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); - - auto &DL = CGF.CGM.getDataLayout(); + bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); // Some versions of glibc and gcc use idioms (particularly in their malloc // routines) that add a pointer-sized integer (known to be a pointer value) diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index e426cab16331..9831a13aa112 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1514,6 +1514,8 @@ class CIRGenFunction : public CIRGenTypeCache { return it->second; } + Address getAddrOfField(LValue base, const clang::FieldDecl *field, unsigned index); + /// Given an opaque value expression, return its LValue mapping if it exists, /// otherwise create one. LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e); diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index a7f7529b1a53..66371e48ff47 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -43,10 +43,14 @@ void store_field() { // CHECK: %9 = cir.binop(or, %8, %6) : !u32i // CHECK: cir.store %9, %2 : !u32i, cir.ptr -void store_neg_field() { +void store_neg_field() { S s; s.d = -1; -} +} + +void copy_s(S& s1, S& s2) { + s1.d = s2.d; +} // CHECK: cir.func @_Z15store_neg_field // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i From f7fffd451e654acab1ee37a4b163e8b95acd38ba Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 24 Aug 2023 10:42:22 +0300 Subject: [PATCH 04/15] wip --- clang/test/CIR/CodeGen/bitfields.cpp | 7 ------- 1 file changed, 7 deletions(-) diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 66371e48ff47..bc135d2e1582 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -47,11 +47,6 @@ void store_neg_field() { S s; s.d = -1; } - -void copy_s(S& s1, S& s2) { - s1.d = s2.d; -} - // CHECK: cir.func @_Z15store_neg_field // CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: %2 = cir.unary(minus, %1) : !s32i, !s32i @@ -67,5 +62,3 @@ void copy_s(S& s1, S& s2) { // CHECK: %12 = cir.binop(and, %6, %11) : !u24i // CHECK: %13 = cir.binop(or, %12, %10) : !u24i // CHECK: cir.store %13, %4 : !u24i, cir.ptr - -// 16383999 decimal = 1111 1001 1111 1111 1111 1111 binary \ No newline at end of file From 7fe5ae1b69efe4c5d00c4b8a10cdcdedb1a1121c Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 24 Aug 2023 10:46:19 +0300 Subject: [PATCH 05/15] removed unrelated code --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index c507062b2d5b..5eaa59621a4c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -138,23 +138,6 @@ static Address buildPointerWithAlignment(const Expr *E, if (isa(CE)) { assert(0 && "not implemented"); - - - //TODO: remove it later - // LValueBaseInfo TargetTypeBaseInfo; - - // CharUnits Align = CGF.CGM.getNaturalPointeeTypeAlignment( - // E->getType(), &TargetTypeBaseInfo); - // assert(!UnimplementedFeature::tbaa()); - - // // If the source l-value is opaque, honor the alignment of the - // // casted-to type. - // if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) { - // if (BaseInfo) - // BaseInfo->mergeForCast(TargetTypeBaseInfo); - // Addr = Address(Addr.getPointer(), Addr.getElementType(), Align, - // IsKnownNonNull); - // } } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && @@ -694,17 +677,6 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, } else { assert(0 && "not implemented"); - // assert(Offset == 0); - - //TODO: remove it later - // // According to the AACPS: - // // When a volatile bit-field is written, and its container does not overlap - // // with any non-bit-field member, its container must be read exactly once - // // and written exactly once using the access width appropriate to the type - // // of the container. The two accesses are not atomic. - // if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) && - // CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad) - // builder.createLoad(Dst.getPointer().getLoc(), Ptr); } // Write the new value back out. From ec3908a47872832822da9d7607061d5280ee615c Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 24 Aug 2023 10:49:09 +0300 Subject: [PATCH 06/15] clang format applied --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 20 +++++++--------- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 34 ++++++++++++++------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index e611cb109050..6b4395cd694f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -266,7 +266,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } - mlir::cir::IntType getCustomIntTy(unsigned size, bool isSigned) { return mlir::cir::IntType::get(getContext(), size, isSigned); } @@ -593,11 +592,10 @@ class CIRGenBuilderTy : public mlir::OpBuilder { getConstAPInt(lhs.getLoc(), lhs.getType(), rhs)); } - mlir::Value createBinop(mlir::Value lhs, - mlir::cir::BinOpKind kind, + mlir::Value createBinop(mlir::Value lhs, mlir::cir::BinOpKind kind, mlir::Value rhs) { - return create( - lhs.getLoc(), lhs.getType(), kind, lhs, rhs); + return create(lhs.getLoc(), lhs.getType(), kind, lhs, + rhs); } mlir::Value createShift(mlir::Value lhs, const llvm::APInt &rhs, @@ -618,21 +616,22 @@ class CIRGenBuilderTy : public mlir::OpBuilder { } mlir::Value createShiftRight(mlir::Value lhs, unsigned bits) { - return createShift(lhs, bits, false); + return createShift(lhs, bits, false); } - mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, unsigned bits) { + mlir::Value createLowBitsSet(mlir::Location loc, unsigned size, + unsigned bits) { auto val = llvm::APInt::getLowBitsSet(size, bits); auto typ = mlir::cir::IntType::get(getContext(), size, false); return getConstAPInt(loc, typ, val); } - + mlir::Value createAnd(mlir::Value lhs, llvm::APInt rhs) { auto val = getConstAPInt(lhs.getLoc(), lhs.getType(), rhs); return createBinop(lhs, mlir::cir::BinOpKind::And, val); } - mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { + mlir::Value createAnd(mlir::Value lhs, mlir::Value rhs) { return createBinop(lhs, mlir::cir::BinOpKind::And, rhs); } @@ -641,7 +640,7 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createBinop(lhs, mlir::cir::BinOpKind::Or, val); } - mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { + mlir::Value createOr(mlir::Value lhs, mlir::Value rhs) { return createBinop(lhs, mlir::cir::BinOpKind::Or, rhs); } @@ -694,7 +693,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::Value createBitcast(mlir::Value src, mlir::Type newTy) { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } - }; } // namespace cir diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 5eaa59621a4c..0ce995b370be 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -205,16 +205,17 @@ static bool isAAPCS(const TargetInfo &TargetInfo) { return TargetInfo.getABI().startswith("aapcs"); } -Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, unsigned index) { +Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, + unsigned index) { if (index == 0) return base.getAddress(); auto loc = getLoc(field->getLocation()); auto fieldType = convertType(field->getType()); auto fieldPtr = - mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); + mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); auto sea = getBuilder().create( - loc, fieldPtr, base.getPointer(), field->getName(), index); + loc, fieldPtr, base.getPointer(), field->getName(), index); return Address(sea->getResult(0), CharUnits::One()); } @@ -222,7 +223,7 @@ Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, unsi LValue CIRGenFunction::buildLValueForField(LValue base, const FieldDecl *field) { LValueBaseInfo BaseInfo = base.getBaseInfo(); - + if (field->isBitField()) { const CIRGenRecordLayout &RL = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); @@ -238,9 +239,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, const RecordDecl *rec = field->getParent(); if (!UseVolatile) { if (!IsInPreservedAIRegion && - (!getDebugInfo() || !rec->hasAttr())) { - if (Idx != 0) - Addr = getAddrOfField(base, field, Idx); + (!getDebugInfo() || !rec->hasAttr())) { + if (Idx != 0) + Addr = getAddrOfField(base, field, Idx); } else { llvm_unreachable("NYI"); } @@ -250,9 +251,9 @@ LValue CIRGenFunction::buildLValueForField(LValue base, // Get the access type. mlir::Type FieldIntTy = builder.getCustomIntTy(SS, false); - + auto loc = getLoc(field->getLocation()); - if (Addr.getElementType() != FieldIntTy) { + if (Addr.getElementType() != FieldIntTy) { Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); } if (UseVolatile) { @@ -662,15 +663,16 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, // Mask the source value as needed. if (!hasBooleanRepresentation(Dst.getType())) - SrcVal = builder.createAnd(SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); - + SrcVal = builder.createAnd( + SrcVal, llvm::APInt::getLowBitsSet(SrcWidth, Info.Size)); + MaskedVal = SrcVal; if (Offset) SrcVal = builder.createShiftLeft(SrcVal, Offset); // Mask out the original value. - Val = builder.createAnd(Val, - ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); + Val = builder.createAnd( + Val, ~llvm::APInt::getBitsSet(SrcWidth, Offset, Offset + Info.Size)); // Or together the unchanged values and the source value. SrcVal = builder.createOr(Val, SrcVal); @@ -687,7 +689,7 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, // Return the new value of the bit-field, if requested. if (Result) { mlir::Value ResultVal = MaskedVal; - ResultVal = builder.createIntCast(ResultVal, ResLTy); + ResultVal = builder.createIntCast(ResultVal, ResLTy); // Sign extend the value if needed. if (Info.IsSigned) { @@ -695,7 +697,7 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, unsigned HighBits = StorageSize - Info.Size; if (HighBits) { - ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftLeft(ResultVal, HighBits); ResultVal = builder.createShiftRight(ResultVal, HighBits); } } @@ -933,7 +935,7 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { } else { buildStoreThroughLValue(RV, LV); } - + assert(!getContext().getLangOpts().OpenMP && "last priv cond not implemented"); return LV; From 7dbf3c0d8fd6e13d734b27827c18418dd324ccbc Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 24 Aug 2023 14:14:33 +0300 Subject: [PATCH 07/15] minor fixes --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 1 - clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 101 +++++++++++++------------ clang/lib/CIR/CodeGen/CIRGenFunction.h | 3 +- clang/test/CIR/CodeGen/bitfields.cpp | 21 ++++- 4 files changed, 73 insertions(+), 53 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index 6b4395cd694f..96b5ea508ce0 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -694,7 +694,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { return createCast(mlir::cir::CastKind::bitcast, src, newTy); } }; - } // namespace cir #endif diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 0ce995b370be..7b474b3b7905 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -9,8 +9,6 @@ // This contains code to emit Expr nodes as CIR code. // //===----------------------------------------------------------------------===// -#include - #include "CIRGenBuilder.h" #include "CIRGenCXXABI.h" #include "CIRGenCall.h" @@ -137,7 +135,7 @@ static Address buildPointerWithAlignment(const Expr *E, *BaseInfo = InnerBaseInfo; if (isa(CE)) { - assert(0 && "not implemented"); + llvm_unreachable("NYI"); } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) && @@ -220,52 +218,57 @@ Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, return Address(sea->getResult(0), CharUnits::One()); } -LValue CIRGenFunction::buildLValueForField(LValue base, - const FieldDecl *field) { +static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, + const CIRGenBitFieldInfo &info, + const FieldDecl *field) { + return isAAPCS(cgm.getTarget()) && cgm.getCodeGenOpts().AAPCSBitfieldWidth && + info.VolatileStorageSize != 0 && + field->getType() + .withCVRQualifiers(base.getVRQualifiers()) + .isVolatileQualified(); +} + +LValue CIRGenFunction::buildLValueForBitField(LValue base, + const FieldDecl *field) { + LValueBaseInfo BaseInfo = base.getBaseInfo(); + const RecordDecl *rec = field->getParent(); + auto &layout = CGM.getTypes().getCIRGenRecordLayout(field->getParent()); + auto &info = layout.getBitFieldInfo(field); + auto useVolatile = useVolatileForBitField(CGM, base, info, field); + unsigned Idx = layout.getCIRFieldNo(field); + + if (useVolatile || + (IsInPreservedAIRegion || + (getDebugInfo() && rec->hasAttr()))) { + llvm_unreachable("NYI"); + } - if (field->isBitField()) { - const CIRGenRecordLayout &RL = - CGM.getTypes().getCIRGenRecordLayout(field->getParent()); - const CIRGenBitFieldInfo &Info = RL.getBitFieldInfo(field); - const bool UseVolatile = isAAPCS(CGM.getTarget()) && - CGM.getCodeGenOpts().AAPCSBitfieldWidth && - Info.VolatileStorageSize != 0 && - field->getType() - .withCVRQualifiers(base.getVRQualifiers()) - .isVolatileQualified(); - Address Addr = base.getAddress(); - unsigned Idx = RL.getCIRFieldNo(field); - const RecordDecl *rec = field->getParent(); - if (!UseVolatile) { - if (!IsInPreservedAIRegion && - (!getDebugInfo() || !rec->hasAttr())) { - if (Idx != 0) - Addr = getAddrOfField(base, field, Idx); - } else { - llvm_unreachable("NYI"); - } - } - const unsigned SS = - UseVolatile ? Info.VolatileStorageSize : Info.StorageSize; + Address Addr = getAddrOfField(base, field, Idx); - // Get the access type. - mlir::Type FieldIntTy = builder.getCustomIntTy(SS, false); + const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; - auto loc = getLoc(field->getLocation()); - if (Addr.getElementType() != FieldIntTy) { - Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); - } - if (UseVolatile) { - llvm_unreachable("NYI"); - } + // Get the access type. + mlir::Type FieldIntTy = builder.getCustomIntTy(SS, false); - QualType fieldType = - field->getType().withCVRQualifiers(base.getVRQualifiers()); - // TODO: Support TBAA for bit fields. - LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); - return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo); - } + auto loc = getLoc(field->getLocation()); + if (Addr.getElementType() != FieldIntTy) + Addr = builder.createElementBitCast(loc, Addr, FieldIntTy); + + QualType fieldType = + field->getType().withCVRQualifiers(base.getVRQualifiers()); + // TODO: Support TBAA for bit fields. + LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); + return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); +} + +LValue CIRGenFunction::buildLValueForField(LValue base, + const FieldDecl *field) { + + LValueBaseInfo BaseInfo = base.getBaseInfo(); + + if (field->isBitField()) + return buildLValueForBitField(base, field); // Fields of may-alias structures are may-alais themselves. // FIXME: this hould get propagated down through anonymous structs and unions. @@ -596,17 +599,15 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, unsigned HighBits = StorageSize - Offset - Info.Size; if (HighBits) - Val = builder.createShift(Val, llvm::APInt(ValWidth, HighBits), true); + Val = builder.createShiftLeft(Val, HighBits); if (Offset + HighBits) - Val = builder.createShift(Val, llvm::APInt(ValWidth, Offset + HighBits), - false); + Val = builder.createShiftRight(Val, Offset + HighBits); } else { if (Offset) - Val = builder.createShift(Val, llvm::APInt(ValWidth, Offset), false); + Val = builder.createShiftRight(Val, Offset); if (static_cast(Offset) + Info.Size < StorageSize) - Val = - builder.createBinop(Val, mlir::cir::BinOpKind::And, + Val = builder.createAnd(Val, llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); } Val = builder.createIntCast(Val, ResLTy); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9831a13aa112..8947f3f93578 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1487,7 +1487,8 @@ class CIRGenFunction : public CIRGenTypeCache { AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *FD); LValue buildLValueForField(LValue Base, const clang::FieldDecl *Field); - + LValue buildLValueForBitField(LValue base, const FieldDecl *field); + /// Like buildLValueForField, excpet that if the Field is a reference, this /// will return the address of the reference and not the address of the value /// stored in the reference. diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index bc135d2e1582..bd014af279c3 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -17,7 +17,6 @@ void m() { // CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !u32i, #cir.recdecl.ast> // CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> - struct S { int a : 4; int b : 27; @@ -62,3 +61,23 @@ void store_neg_field() { // CHECK: %12 = cir.binop(and, %6, %11) : !u24i // CHECK: %13 = cir.binop(or, %12, %10) : !u24i // CHECK: cir.store %13, %4 : !u24i, cir.ptr + + +int load_field(S& s) { + return s.d; +} + +// CHECK: cir.func @_Z10load_fieldR1S +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: %3 = "cir.struct_element_addr"(%2) {member_index = 1 : index, member_name = "d"} : (!cir.ptr) -> !cir.ptr +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr +// CHECK: %5 = cir.load %4 : cir.ptr , !u24i +// CHECK: %6 = cir.cast(integral, %5 : !u24i), !s24i +// CHECK: %7 = cir.const(#cir.int<5> : !s24i) : !s24i +// CHECK: %8 = cir.shift(left, %6 : !s24i, %7 : !s24i) -> !s24i +// CHECK: %9 = cir.const(#cir.int<22> : !s24i) : !s24i +// CHECK: %10 = cir.shift( right, %8 : !s24i, %9 : !s24i) -> !s24i +// CHECK: %11 = cir.cast(integral, %10 : !s24i), !s32i +// CHECK: cir.store %11, %1 : !s32i, cir.ptr +// CHECK: %12 = cir.load %1 : cir.ptr , !s32i +// CHECK: cir.return %12 : !s32i \ No newline at end of file From df803b6ee29789de1b8ae076d171d34a37b86b9a Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 31 Aug 2023 10:24:12 +0300 Subject: [PATCH 08/15] use GetMemberOp --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 25d0c3c87a80..8e4d0204e198 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -212,10 +212,10 @@ Address CIRGenFunction::getAddrOfField(LValue base, const FieldDecl *field, auto fieldType = convertType(field->getType()); auto fieldPtr = mlir::cir::PointerType::get(getBuilder().getContext(), fieldType); - auto sea = getBuilder().create( - loc, fieldPtr, base.getPointer(), field->getName(), index); + auto sea = getBuilder().createGetMember( + loc, fieldPtr, base.getPointer(), field->getName(), index); - return Address(sea->getResult(0), CharUnits::One()); + return Address(sea, CharUnits::One()); } static bool useVolatileForBitField(const CIRGenModule &cgm, LValue base, From fba70cc0f581826ce28dd2d9937261a08e283502 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 31 Aug 2023 10:55:25 +0300 Subject: [PATCH 09/15] tests passed --- clang/test/CIR/CodeGen/bitfields.cpp | 78 ++++++++++++++-------------- 1 file changed, 38 insertions(+), 40 deletions(-) diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index 9880657d6e1c..a4aacd854927 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -14,8 +14,8 @@ void m() { __long l; } -// CHECK: !ty_22struct2Eanon22 = !cir.struct<"struct.anon", !u32i, #cir.recdecl.ast> -// CHECK: !ty_22struct2E__long22 = !cir.struct<"struct.__long", !ty_22struct2Eanon22, !u32i, !cir.ptr> +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> struct S { int a : 4; @@ -31,53 +31,51 @@ void store_field() { } // CHECK: cir.func @_Z11store_field -// CHECK: %1 = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr -// CHECK: %3 = cir.cast(integral, %1 : !s32i), !u32i -// CHECK: %4 = cir.load %2 : cir.ptr , !u32i -// CHECK: %5 = cir.const(#cir.int<15> : !u32i) : !u32i -// CHECK: %6 = cir.binop(and, %3, %5) : !u32i -// CHECK: %7 = cir.const(#cir.int<4294967280> : !u32i) : !u32i -// CHECK: %8 = cir.binop(and, %4, %7) : !u32i -// CHECK: %9 = cir.binop(or, %8, %6) : !u32i -// CHECK: cir.store %9, %2 : !u32i, cir.ptr +// CHECK: %1 = cir.const(#cir.int<3> : !s32i) : !s32i loc(#loc9) +// CHECK: %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr loc(#loc10) +// CHECK: %3 = cir.cast(integral, %1 : !s32i), !u32i loc(#loc9) +// CHECK: %4 = cir.load %2 : cir.ptr , !u32i loc(#loc10) +// CHECK: %5 = cir.const(#cir.int<15> : !u32i) : !u32i loc(#loc9) +// CHECK: %6 = cir.binop(and, %3, %5) : !u32i loc(#loc9) +// CHECK: %7 = cir.const(#cir.int<4294967280> : !u32i) : !u32i loc(#loc10) +// CHECK: %8 = cir.binop(and, %4, %7) : !u32i loc(#loc10) +// CHECK: %9 = cir.binop(or, %8, %6) : !u32i loc(#loc10) +// CHECK: cir.store %9, %2 : !u32i, cir.ptr loc(#loc32) void store_neg_field() { S s; s.d = -1; } // CHECK: cir.func @_Z15store_neg_field -// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i -// CHECK: %2 = cir.unary(minus, %1) : !s32i, !s32i -// CHECK: %3 = "cir.struct_element_addr"(%0) {member_index = 1 : index, member_name = "d"} : (!cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr -// CHECK: %5 = cir.cast(integral, %2 : !s32i), !u24i -// CHECK: %6 = cir.load %4 : cir.ptr , !u24i -// CHECK: %7 = cir.const(#cir.int<3> : !u24i) : !u24i -// CHECK: %8 = cir.binop(and, %5, %7) : !u24i -// CHECK: %9 = cir.const(#cir.int<17> : !u24i) : !u24i -// CHECK: %10 = cir.shift(left, %8 : !u24i, %9 : !u24i) -> !u24i -// CHECK: %11 = cir.const(#cir.int<16383999> : !u24i) : !u24i -// CHECK: %12 = cir.binop(and, %6, %11) : !u24i -// CHECK: %13 = cir.binop(or, %12, %10) : !u24i -// CHECK: cir.store %13, %4 : !u24i, cir.ptr - +// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc16) +// CHECK: %2 = cir.unary(minus, %1) : !s32i, !s32i loc(#loc17) +// CHECK: %3 = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr loc(#loc18) +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr loc(#loc18) +// CHECK: %5 = cir.cast(integral, %2 : !s32i), !u24i loc(#loc17) +// CHECK: %6 = cir.load %4 : cir.ptr , !u24i loc(#loc18) +// CHECK: %7 = cir.const(#cir.int<3> : !u24i) : !u24i loc(#loc17) +// CHECK: %8 = cir.binop(and, %5, %7) : !u24i loc(#loc17) +// CHECK: %9 = cir.const(#cir.int<17> : !u24i) : !u24i loc(#loc17) +// CHECK: %10 = cir.shift(left, %8 : !u24i, %9 : !u24i) -> !u24i loc(#loc17) +// CHECK: %11 = cir.const(#cir.int<16383999> : !u24i) : !u24i loc(#loc18) +// CHECK: %12 = cir.binop(and, %6, %11) : !u24i loc(#loc18) +// CHECK: %13 = cir.binop(or, %12, %10) : !u24i loc(#loc18) +// CHECK: cir.store %13, %4 : !u24i, cir.ptr loc(#loc35) int load_field(S& s) { return s.d; } // CHECK: cir.func @_Z10load_fieldR1S -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: %3 = "cir.struct_element_addr"(%2) {member_index = 1 : index, member_name = "d"} : (!cir.ptr) -> !cir.ptr -// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr -// CHECK: %5 = cir.load %4 : cir.ptr , !u24i -// CHECK: %6 = cir.cast(integral, %5 : !u24i), !s24i -// CHECK: %7 = cir.const(#cir.int<5> : !s24i) : !s24i -// CHECK: %8 = cir.shift(left, %6 : !s24i, %7 : !s24i) -> !s24i -// CHECK: %9 = cir.const(#cir.int<22> : !s24i) : !s24i -// CHECK: %10 = cir.shift( right, %8 : !s24i, %9 : !s24i) -> !s24i -// CHECK: %11 = cir.cast(integral, %10 : !s24i), !s32i -// CHECK: cir.store %11, %1 : !s32i, cir.ptr -// CHECK: %12 = cir.load %1 : cir.ptr , !s32i -// CHECK: cir.return %12 : !s32i +// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr loc(#loc25) +// CHECK: %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr loc(#loc18) +// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr loc(#loc18) +// CHECK: %5 = cir.load %4 : cir.ptr , !u24i loc(#loc26) +// CHECK: %6 = cir.cast(integral, %5 : !u24i), !s24i loc(#loc26) +// CHECK: %7 = cir.const(#cir.int<5> : !s24i) : !s24i loc(#loc26) +// CHECK: %8 = cir.shift(left, %6 : !s24i, %7 : !s24i) -> !s24i loc(#loc26) +// CHECK: %9 = cir.const(#cir.int<22> : !s24i) : !s24i loc(#loc26) +// CHECK: %10 = cir.shift( right, %8 : !s24i, %9 : !s24i) -> !s24i loc(#loc26) +// CHECK: %11 = cir.cast(integral, %10 : !s24i), !s32i loc(#loc26) +// CHECK: cir.store %11, %1 : !s32i, cir.ptr loc(#loc38) +// CHECK: %12 = cir.load %1 : cir.ptr , !s32i loc(#loc38) From 38cd86e25c5b3222e8108fa7aa7980b035707c05 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 31 Aug 2023 13:57:12 +0300 Subject: [PATCH 10/15] wip --- clang/test/CIR/CodeGen/bitfields.c | 79 +++++++++++++++++++++++++++ clang/test/CIR/CodeGen/bitfields.cpp | 80 ++++++++++++++-------------- 2 files changed, 118 insertions(+), 41 deletions(-) create mode 100644 clang/test/CIR/CodeGen/bitfields.c diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c new file mode 100644 index 000000000000..22ce5db948fc --- /dev/null +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -0,0 +1,79 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir-enable -emit-cir %s -o %t.cir +// RUN: FileCheck --input-file=%t.cir %s + +struct __long { + struct __attribute__((__packed__)) { + unsigned __is_long_ : 1; + unsigned __cap_ : sizeof(unsigned) * 8 - 1; + }; + unsigned __size_; + unsigned *__data_; +}; + +void m() { + struct __long l; +} + +// CHECK: !ty_22anon22 = !cir.struct +// CHECK: !ty_22__long22 = !cir.struct}> + +typedef struct { + int a : 4; + int b : 27; + int c : 17; + int d : 2; + int e : 15; +} S; // 65 bits in total, i.e. mote than 64 + +// CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr +void store_field() { + S s; + s.a = 3; +} + +// CHECK: cir.func {{.*@store_neg_field}} +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u24i) : !u24i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u24i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u24i) : !u24i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u24i, [[TMP9]] : !u24i) -> !u24i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<16383999> : !u24i) : !u24i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u24i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u24i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u24i, cir.ptr +void store_neg_field() { + S s; + s.d = -1; +} + +// CHECK: cir.func {{.*@load_field}} +// CHECK: [[TMP2]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: [[TMP3]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP6]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i +// CHECK: [[TMP7]] = cir.const(#cir.int<5> : !s24i) : !s24i +// CHECK: [[TMP8]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i +// CHECK: [[TMP9]] = cir.const(#cir.int<22> : !s24i) : !s24i +// CHECK: [[TMP10]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i +// CHECK: [[TMP11]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12]] = cir.load [[TMP1]] : cir.ptr , !s32i +int load_field(S* s) { + return s->d; +} diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index a4aacd854927..a91770a6d2ca 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -25,57 +25,55 @@ struct S { int e : 15; }; // 65 bits in total, i.e. mote than 64 +// CHECK: cir.func @_Z11store_field +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i +// CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i +// CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i +// CHECK: [[TMP6:%.*]] = cir.binop(and, [[TMP3]], [[TMP5]]) : !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<4294967280> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP4]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.binop(or, [[TMP8]], [[TMP6]]) : !u32i +// CHECK: cir.store [[TMP9]], [[TMP2]] : !u32i, cir.ptr void store_field() { S s; s.a = 3; } -// CHECK: cir.func @_Z11store_field -// CHECK: %1 = cir.const(#cir.int<3> : !s32i) : !s32i loc(#loc9) -// CHECK: %2 = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr loc(#loc10) -// CHECK: %3 = cir.cast(integral, %1 : !s32i), !u32i loc(#loc9) -// CHECK: %4 = cir.load %2 : cir.ptr , !u32i loc(#loc10) -// CHECK: %5 = cir.const(#cir.int<15> : !u32i) : !u32i loc(#loc9) -// CHECK: %6 = cir.binop(and, %3, %5) : !u32i loc(#loc9) -// CHECK: %7 = cir.const(#cir.int<4294967280> : !u32i) : !u32i loc(#loc10) -// CHECK: %8 = cir.binop(and, %4, %7) : !u32i loc(#loc10) -// CHECK: %9 = cir.binop(or, %8, %6) : !u32i loc(#loc10) -// CHECK: cir.store %9, %2 : !u32i, cir.ptr loc(#loc32) - +// CHECK: cir.func @_Z15store_neg_field +// CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i +// CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i +// CHECK: [[TMP3:%.*]] = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u24i) : !u24i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u24i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u24i) : !u24i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u24i, [[TMP9]] : !u24i) -> !u24i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<16383999> : !u24i) : !u24i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u24i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u24i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u24i, cir.ptr void store_neg_field() { S s; s.d = -1; } -// CHECK: cir.func @_Z15store_neg_field -// CHECK: %1 = cir.const(#cir.int<1> : !s32i) : !s32i loc(#loc16) -// CHECK: %2 = cir.unary(minus, %1) : !s32i, !s32i loc(#loc17) -// CHECK: %3 = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr loc(#loc18) -// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr loc(#loc18) -// CHECK: %5 = cir.cast(integral, %2 : !s32i), !u24i loc(#loc17) -// CHECK: %6 = cir.load %4 : cir.ptr , !u24i loc(#loc18) -// CHECK: %7 = cir.const(#cir.int<3> : !u24i) : !u24i loc(#loc17) -// CHECK: %8 = cir.binop(and, %5, %7) : !u24i loc(#loc17) -// CHECK: %9 = cir.const(#cir.int<17> : !u24i) : !u24i loc(#loc17) -// CHECK: %10 = cir.shift(left, %8 : !u24i, %9 : !u24i) -> !u24i loc(#loc17) -// CHECK: %11 = cir.const(#cir.int<16383999> : !u24i) : !u24i loc(#loc18) -// CHECK: %12 = cir.binop(and, %6, %11) : !u24i loc(#loc18) -// CHECK: %13 = cir.binop(or, %12, %10) : !u24i loc(#loc18) -// CHECK: cir.store %13, %4 : !u24i, cir.ptr loc(#loc35) +// CHECK: cir.func @_Z10load_field +// CHECK: [[TMP2]] = cir.load %0 : cir.ptr >, !cir.ptr +// CHECK: [[TMP3]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP6]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i +// CHECK: [[TMP7]] = cir.const(#cir.int<5> : !s24i) : !s24i +// CHECK: [[TMP8]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i +// CHECK: [[TMP9]] = cir.const(#cir.int<22> : !s24i) : !s24i +// CHECK: [[TMP10]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i +// CHECK: [[TMP11]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr +// CHECK: [[TMP12]] = cir.load [[TMP1]] : cir.ptr , !s32i int load_field(S& s) { return s.d; } - -// CHECK: cir.func @_Z10load_fieldR1S -// CHECK: %2 = cir.load %0 : cir.ptr >, !cir.ptr loc(#loc25) -// CHECK: %3 = cir.get_member %2[1] {name = "d"} : !cir.ptr -> !cir.ptr loc(#loc18) -// CHECK: %4 = cir.cast(bitcast, %3 : !cir.ptr), !cir.ptr loc(#loc18) -// CHECK: %5 = cir.load %4 : cir.ptr , !u24i loc(#loc26) -// CHECK: %6 = cir.cast(integral, %5 : !u24i), !s24i loc(#loc26) -// CHECK: %7 = cir.const(#cir.int<5> : !s24i) : !s24i loc(#loc26) -// CHECK: %8 = cir.shift(left, %6 : !s24i, %7 : !s24i) -> !s24i loc(#loc26) -// CHECK: %9 = cir.const(#cir.int<22> : !s24i) : !s24i loc(#loc26) -// CHECK: %10 = cir.shift( right, %8 : !s24i, %9 : !s24i) -> !s24i loc(#loc26) -// CHECK: %11 = cir.cast(integral, %10 : !s24i), !s32i loc(#loc26) -// CHECK: cir.store %11, %1 : !s32i, cir.ptr loc(#loc38) -// CHECK: %12 = cir.load %1 : cir.ptr , !s32i loc(#loc38) From f090fb5b094d3e62413f1c1b4fa7aa75e22781ba Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 31 Aug 2023 15:39:51 +0300 Subject: [PATCH 11/15] refactored tests --- clang/test/CIR/CodeGen/bitfields.c | 29 +++++++++++++++------------- clang/test/CIR/CodeGen/bitfields.cpp | 29 +++++++++++++++------------- 2 files changed, 32 insertions(+), 26 deletions(-) diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 22ce5db948fc..09885e3652f7 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -26,8 +26,9 @@ typedef struct { } S; // 65 bits in total, i.e. mote than 64 // CHECK: cir.func {{.*@store_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i // CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i // CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i @@ -42,9 +43,10 @@ void store_field() { } // CHECK: cir.func {{.*@store_neg_field}} +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr // CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i // CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i @@ -62,18 +64,19 @@ void store_neg_field() { } // CHECK: cir.func {{.*@load_field}} -// CHECK: [[TMP2]] = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: [[TMP3]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP6]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i -// CHECK: [[TMP7]] = cir.const(#cir.int<5> : !s24i) : !s24i -// CHECK: [[TMP8]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i -// CHECK: [[TMP9]] = cir.const(#cir.int<22> : !s24i) : !s24i -// CHECK: [[TMP10]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i -// CHECK: [[TMP11]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<5> : !s24i) : !s24i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<22> : !s24i) : !s24i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i // CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP12]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i int load_field(S* s) { return s->d; } diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index a91770a6d2ca..a45ceaa2fafa 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -26,8 +26,9 @@ struct S { }; // 65 bits in total, i.e. mote than 64 // CHECK: cir.func @_Z11store_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<3> : !s32i) : !s32i -// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, %0 : !cir.ptr), !cir.ptr +// CHECK: [[TMP2:%.*]] = cir.cast(bitcast, [[TMP0]] : !cir.ptr), !cir.ptr // CHECK: [[TMP3:%.*]] = cir.cast(integral, [[TMP1]] : !s32i), !u32i // CHECK: [[TMP4:%.*]] = cir.load [[TMP2]] : cir.ptr , !u32i // CHECK: [[TMP5:%.*]] = cir.const(#cir.int<15> : !u32i) : !u32i @@ -42,9 +43,10 @@ void store_field() { } // CHECK: cir.func @_Z15store_neg_field +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i -// CHECK: [[TMP3:%.*]] = cir.get_member %0[1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr // CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr // CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i // CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i @@ -62,18 +64,19 @@ void store_neg_field() { } // CHECK: cir.func @_Z10load_field -// CHECK: [[TMP2]] = cir.load %0 : cir.ptr >, !cir.ptr -// CHECK: [[TMP3]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP6]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i -// CHECK: [[TMP7]] = cir.const(#cir.int<5> : !s24i) : !s24i -// CHECK: [[TMP8]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i -// CHECK: [[TMP9]] = cir.const(#cir.int<22> : !s24i) : !s24i -// CHECK: [[TMP10]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i -// CHECK: [[TMP11]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > +// CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr +// CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<5> : !s24i) : !s24i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<22> : !s24i) : !s24i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i // CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr -// CHECK: [[TMP12]] = cir.load [[TMP1]] : cir.ptr , !s32i +// CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i int load_field(S& s) { return s.d; } From f8a49f1abec84a8b687c2b39ce3ad8d10b6a736e Mon Sep 17 00:00:00 2001 From: gitoleg Date: Thu, 31 Aug 2023 16:55:26 +0300 Subject: [PATCH 12/15] wip --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 10 ++++++++-- clang/test/CIR/CodeGen/bitfields.c | 2 +- clang/test/CIR/CodeGen/bitfields.cpp | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 8e4d0204e198..4e4668a8134f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -257,7 +257,8 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); - // TODO: Support TBAA for bit fields. + + assert(!UnimplementedFeature::tbaa() && "NYI TBAA for bit fields"); LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource()); return LValue::MakeBitfield(Addr, info, fieldType, FieldBaseInfo); } @@ -686,7 +687,12 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, SrcVal = builder.createOr(Val, SrcVal); } else { - assert(0 && "not implemented"); + // According to the AACPS: + // When a volatile bit-field is written, and its container does not overlap + // with any non-bit-field member, its container must be read exactly once + // and written exactly once using the access width appropriate to the type + // of the container. The two accesses are not atomic. + llvm_unreachable("volatile bit-field is not implemented for the AACPS"); } // Write the new value back out. diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 09885e3652f7..1f2af08898bd 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -23,7 +23,7 @@ typedef struct { int c : 17; int d : 2; int e : 15; -} S; // 65 bits in total, i.e. mote than 64 +} S; // 65 bits in total, i.e. more than 64 // CHECK: cir.func {{.*@store_field}} // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr , diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index a45ceaa2fafa..b837dc63ddbf 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -23,7 +23,7 @@ struct S { int c : 17; int d : 2; int e : 15; -}; // 65 bits in total, i.e. mote than 64 +}; // 65 bits in total, i.e. more than 64 // CHECK: cir.func @_Z11store_field // CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr From 59642890cb2547d9cdcaae5fdb83b5cf09c96cc2 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Mon, 4 Sep 2023 14:14:00 +0300 Subject: [PATCH 13/15] fixed tests, use powerOfTwo storage --- clang/lib/CIR/CodeGen/CIRGenBuilder.h | 4 -- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 4 +- .../CIR/CodeGen/CIRRecordLayoutBuilder.cpp | 2 +- clang/test/CIR/CodeGen/bitfields.c | 38 +++++++++--------- clang/test/CIR/CodeGen/bitfields.cpp | 40 +++++++++---------- 5 files changed, 42 insertions(+), 46 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f265e0541196..8111bcf6ad17 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -303,10 +303,6 @@ class CIRGenBuilderTy : public mlir::OpBuilder { mlir::cir::IntType getUInt32Ty() { return typeCache.UInt32Ty; } mlir::cir::IntType getUInt64Ty() { return typeCache.UInt64Ty; } - mlir::cir::IntType getCustomIntTy(unsigned size, bool isSigned) { - return mlir::cir::IntType::get(getContext(), size, isSigned); - } - bool isInt8Ty(mlir::Type i) { return i == typeCache.UInt8Ty || i == typeCache.SInt8Ty; } diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 4e4668a8134f..1f16d51adef3 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -249,7 +249,7 @@ LValue CIRGenFunction::buildLValueForBitField(LValue base, const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize; // Get the access type. - mlir::Type FieldIntTy = builder.getCustomIntTy(SS, false); + mlir::Type FieldIntTy = builder.getUIntNTy(SS); auto loc = getLoc(field->getLocation()); if (Addr.getElementType() != FieldIntTy) @@ -602,7 +602,7 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, if (Info.IsSigned) { assert(static_cast(Offset + Info.Size) <= StorageSize); - mlir::Type typ = builder.getCustomIntTy(ValWidth, true); + mlir::Type typ = builder.getSIntNTy(ValWidth); Val = builder.createIntCast(Val, typ); unsigned HighBits = StorageSize - Offset - Info.Size; diff --git a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp index 8613722542e6..f0753945f28f 100644 --- a/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp +++ b/clang/lib/CIR/CodeGen/CIRRecordLayoutBuilder.cpp @@ -136,7 +136,7 @@ struct CIRRecordLowering final { /// Wraps mlir::cir::IntType with some implicit arguments. mlir::Type getUIntNType(uint64_t NumBits) { - unsigned AlignedBits = llvm::alignTo(NumBits, astContext.getCharWidth()); + unsigned AlignedBits = llvm::PowerOf2Ceil(NumBits); return mlir::cir::IntType::get(&cirGenTypes.getMLIRContext(), AlignedBits, /*isSigned=*/false); } diff --git a/clang/test/CIR/CodeGen/bitfields.c b/clang/test/CIR/CodeGen/bitfields.c index 1f2af08898bd..b4c26f8ae05d 100644 --- a/clang/test/CIR/CodeGen/bitfields.c +++ b/clang/test/CIR/CodeGen/bitfields.c @@ -47,17 +47,17 @@ void store_field() { // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u24i) : !u24i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u24i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u24i) : !u24i -// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u24i, [[TMP9]] : !u24i) -> !u24i -// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<16383999> : !u24i) : !u24i -// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u24i -// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u24i -// CHECK: cir.store [[TMP13]], [[TMP4]] : !u24i, cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr void store_neg_field() { S s; s.d = -1; @@ -67,14 +67,14 @@ void store_neg_field() { // CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > // CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<5> : !s24i) : !s24i -// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<22> : !s24i) : !s24i -// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i -// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i // CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr // CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i int load_field(S* s) { diff --git a/clang/test/CIR/CodeGen/bitfields.cpp b/clang/test/CIR/CodeGen/bitfields.cpp index b837dc63ddbf..bcaa3bc3008a 100644 --- a/clang/test/CIR/CodeGen/bitfields.cpp +++ b/clang/test/CIR/CodeGen/bitfields.cpp @@ -43,21 +43,21 @@ void store_field() { } // CHECK: cir.func @_Z15store_neg_field -// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr +// CHECK: [[TMP0:%.*]] = cir.alloca !ty_22S22, cir.ptr // CHECK: [[TMP1:%.*]] = cir.const(#cir.int<1> : !s32i) : !s32i // CHECK: [[TMP2:%.*]] = cir.unary(minus, [[TMP1]]) : !s32i, !s32i // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP0]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u24i -// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u24i) : !u24i -// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u24i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u24i) : !u24i -// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u24i, [[TMP9]] : !u24i) -> !u24i -// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<16383999> : !u24i) : !u24i -// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u24i -// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u24i -// CHECK: cir.store [[TMP13]], [[TMP4]] : !u24i, cir.ptr +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.cast(integral, [[TMP2]] : !s32i), !u32i +// CHECK: [[TMP6:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<3> : !u32i) : !u32i +// CHECK: [[TMP8:%.*]] = cir.binop(and, [[TMP5]], [[TMP7]]) : !u32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<17> : !u32i) : !u32i +// CHECK: [[TMP10:%.*]] = cir.shift(left, [[TMP8]] : !u32i, [[TMP9]] : !u32i) -> !u32i +// CHECK: [[TMP11:%.*]] = cir.const(#cir.int<4294574079> : !u32i) : !u32i +// CHECK: [[TMP12:%.*]] = cir.binop(and, [[TMP6]], [[TMP11]]) : !u32i +// CHECK: [[TMP13:%.*]] = cir.binop(or, [[TMP12]], [[TMP10]]) : !u32i +// CHECK: cir.store [[TMP13]], [[TMP4]] : !u32i, cir.ptr void store_neg_field() { S s; s.d = -1; @@ -67,14 +67,14 @@ void store_neg_field() { // CHECK: [[TMP0:%.*]] = cir.alloca !cir.ptr, cir.ptr > // CHECK: [[TMP2:%.*]] = cir.load [[TMP0]] : cir.ptr >, !cir.ptr // CHECK: [[TMP3:%.*]] = cir.get_member [[TMP2]][1] {name = "d"} : !cir.ptr -> !cir.ptr -// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr -// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u24i -// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u24i), !s24i -// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<5> : !s24i) : !s24i -// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s24i, [[TMP7]] : !s24i) -> !s24i -// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<22> : !s24i) : !s24i -// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s24i, [[TMP9]] : !s24i) -> !s24i -// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s24i), !s32i +// CHECK: [[TMP4:%.*]] = cir.cast(bitcast, [[TMP3]] : !cir.ptr), !cir.ptr +// CHECK: [[TMP5:%.*]] = cir.load [[TMP4]] : cir.ptr , !u32i +// CHECK: [[TMP6:%.*]] = cir.cast(integral, [[TMP5]] : !u32i), !s32i +// CHECK: [[TMP7:%.*]] = cir.const(#cir.int<13> : !s32i) : !s32i +// CHECK: [[TMP8:%.*]] = cir.shift(left, [[TMP6]] : !s32i, [[TMP7]] : !s32i) -> !s32i +// CHECK: [[TMP9:%.*]] = cir.const(#cir.int<30> : !s32i) : !s32i +// CHECK: [[TMP10:%.*]] = cir.shift( right, [[TMP8]] : !s32i, [[TMP9]] : !s32i) -> !s32i +// CHECK: [[TMP11:%.*]] = cir.cast(integral, [[TMP10]] : !s32i), !s32i // CHECK: cir.store [[TMP11]], [[TMP1]] : !s32i, cir.ptr // CHECK: [[TMP12:%.*]] = cir.load [[TMP1]] : cir.ptr , !s32i int load_field(S& s) { From 2c521453d4f99e52d314ddeba12ae4cb027d5865 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Wed, 6 Sep 2023 11:10:25 +0300 Subject: [PATCH 14/15] adds a new Unimplemented feature: scalarRangeCheck --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 17 ++++++++--------- .../CIR/CodeGen/UnimplementedFeatureGuarding.h | 1 + 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index cb77d2c3f37e..84de2ed1fa0d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -631,8 +631,8 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV, Val = builder.createAnd(Val, llvm::APInt::getLowBitsSet(ValWidth, Info.Size)); } - Val = builder.createIntCast(Val, ResLTy); - // EmitScalarRangeCheck(Val, LV.getType(), Loc); //FIXME: TODO + Val = builder.createIntCast(Val, ResLTy); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return RValue::get(Val); } @@ -2410,9 +2410,9 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(LValue lvalue, mlir::Value CIRGenFunction::buildFromMemory(mlir::Value Value, QualType Ty) { // Bool has a different representation in memory than in registers. - // if (hasBooleanRepresentation(Ty)) { - // llvm_unreachable("NYI"); - // } + if (!Ty->isBooleanType() && hasBooleanRepresentation(Ty)) { + llvm_unreachable("NYI"); + } return Value; } @@ -2447,10 +2447,9 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile, if (isNontemporal) { llvm_unreachable("NYI"); } - - // TODO: TBAA - - // TODO: buildScalarRangeCheck + + assert(!UnimplementedFeature::tbaa() && "NYI"); + assert(!UnimplementedFeature::emitScalarRangeCheck() && "NYI"); return buildFromMemory(Load, Ty); } diff --git a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h index d39bb3c1b48d..5a857a2db39f 100644 --- a/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h +++ b/clang/lib/CIR/CodeGen/UnimplementedFeatureGuarding.h @@ -138,6 +138,7 @@ struct UnimplementedFeature { static bool exceptions() { return false; } static bool metaDataNode() { return false; } static bool isSEHTryScope() { return false; } + static bool emitScalarRangeCheck() { return false; } }; } // namespace cir From 1ac05445c3d69767c5fe1469f43e6361a340c084 Mon Sep 17 00:00:00 2001 From: gitoleg Date: Sat, 9 Sep 2023 15:21:45 +0300 Subject: [PATCH 15/15] minor stylish changes --- clang/lib/CIR/CodeGen/CIRGenExpr.cpp | 32 ++++++++++------------ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 2 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 2 +- 3 files changed, 17 insertions(+), 19 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 30b4b6ac5c44..b99f46d6ece9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -657,7 +657,7 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) { } void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value *Result) { + mlir::Value &Result) { const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo(); mlir::Type ResLTy = getTypes().convertTypeForMem(Dst.getType()); Address Ptr = Dst.getBitFieldAddress(); @@ -713,24 +713,22 @@ void CIRGenFunction::buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, buildStoreOfScalar(SrcVal, Ptr, Dst.isVolatileQualified(), Dst.getType(), Dst.getBaseInfo(), false, false); - // Return the new value of the bit-field, if requested. - if (Result) { - mlir::Value ResultVal = MaskedVal; - ResultVal = builder.createIntCast(ResultVal, ResLTy); + // Return the new value of the bit-field. + mlir::Value ResultVal = MaskedVal; + ResultVal = builder.createIntCast(ResultVal, ResLTy); - // Sign extend the value if needed. - if (Info.IsSigned) { - assert(Info.Size <= StorageSize); - unsigned HighBits = StorageSize - Info.Size; + // Sign extend the value if needed. + if (Info.IsSigned) { + assert(Info.Size <= StorageSize); + unsigned HighBits = StorageSize - Info.Size; - if (HighBits) { - ResultVal = builder.createShiftLeft(ResultVal, HighBits); - ResultVal = builder.createShiftRight(ResultVal, HighBits); - } + if (HighBits) { + ResultVal = builder.createShiftLeft(ResultVal, HighBits); + ResultVal = builder.createShiftRight(ResultVal, HighBits); } - - *Result = buildFromMemory(ResultVal, Dst.getType()); } + + Result = buildFromMemory(ResultVal, Dst.getType()); } static LValue buildGlobalVarDeclLValue(CIRGenFunction &CGF, const Expr *E, @@ -957,8 +955,8 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) { SourceLocRAIIObject Loc{*this, getLoc(E->getSourceRange())}; if (LV.isBitField()) { - mlir::Value *val{0}; - buildStoreThroughBitfieldLValue(RV, LV, val); + mlir::Value result; + buildStoreThroughBitfieldLValue(RV, LV, result); } else { buildStoreThroughLValue(RV, LV); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 4bd5905a795d..b6b21cc0bb25 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1861,7 +1861,7 @@ mlir::Value ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHS.isBitField()) { - CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); + CGF.buildStoreThroughBitfieldLValue(RValue::get(RHS), LHS, RHS); } else { CGF.buildNullabilityCheck(LHS, RHS, E->getExprLoc()); CIRGenFunction::SourceLocRAIIObject loc{CGF, diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index f8cd3e95c6e2..ea5bf59d92c8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1245,7 +1245,7 @@ class CIRGenFunction : public CIRGenTypeCache { void buildStoreThroughLValue(RValue Src, LValue Dst); void buildStoreThroughBitfieldLValue(RValue Src, LValue Dst, - mlir::Value *Result); + mlir::Value &Result); mlir::cir::BrOp buildBranchThroughCleanup(mlir::Location Loc, JumpDest Dest);