2121#include " clang/AST/GlobalDecl.h"
2222#include " clang/Basic/Builtins.h"
2323#include " clang/CIR/Dialect/IR/CIRDialect.h"
24+ #include " clang/CIR/Dialect/IR/CIROpsEnums.h"
2425#include " clang/CIR/Dialect/IR/CIRTypes.h"
2526#include " llvm/Support/Casting.h"
2627#include " llvm/Support/ErrorHandling.h"
@@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E,
128129 if (PtrTy->getPointeeType ()->isVoidType ())
129130 break ;
130131 assert (!UnimplementedFeature::tbaa ());
132+
131133 LValueBaseInfo InnerBaseInfo;
132134 Address Addr = CGF.buildPointerWithAlignment (
133135 CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -211,13 +213,78 @@ static Address buildPointerWithAlignment(const Expr *E,
211213 return Address (CGF.buildScalarExpr (E), Align);
212214}
213215
216+ // / Helper method to check if the underlying ABI is AAPCS
217+ static bool isAAPCS (const TargetInfo &TargetInfo) {
218+ return TargetInfo.getABI ().starts_with (" aapcs" );
219+ }
220+
221+ Address CIRGenFunction::getAddrOfField (LValue base, const FieldDecl *field,
222+ unsigned index) {
223+ if (index == 0 )
224+ return base.getAddress ();
225+
226+ auto loc = getLoc (field->getLocation ());
227+ auto fieldType = convertType (field->getType ());
228+ auto fieldPtr =
229+ mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
230+ auto sea = getBuilder ().createGetMember (
231+ loc, fieldPtr, base.getPointer (), field->getName (), index);
232+
233+ return Address (sea, CharUnits::One ());
234+ }
235+
236+ static bool useVolatileForBitField (const CIRGenModule &cgm, LValue base,
237+ const CIRGenBitFieldInfo &info,
238+ const FieldDecl *field) {
239+ return isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth &&
240+ info.VolatileStorageSize != 0 &&
241+ field->getType ()
242+ .withCVRQualifiers (base.getVRQualifiers ())
243+ .isVolatileQualified ();
244+ }
245+
246+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
247+ const FieldDecl *field) {
248+
249+ LValueBaseInfo BaseInfo = base.getBaseInfo ();
250+ const RecordDecl *rec = field->getParent ();
251+ auto &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
252+ auto &info = layout.getBitFieldInfo (field);
253+ auto useVolatile = useVolatileForBitField (CGM, base, info, field);
254+ unsigned Idx = layout.getCIRFieldNo (field);
255+
256+ if (useVolatile ||
257+ (IsInPreservedAIRegion ||
258+ (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
259+ llvm_unreachable (" NYI" );
260+ }
261+
262+ Address Addr = getAddrOfField (base, field, Idx);
263+
264+ const unsigned SS = useVolatile ? info.VolatileStorageSize : info.StorageSize ;
265+
266+ // Get the access type.
267+ mlir::Type FieldIntTy = builder.getUIntNTy (SS);
268+
269+ auto loc = getLoc (field->getLocation ());
270+ if (Addr.getElementType () != FieldIntTy)
271+ Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
272+
273+ QualType fieldType =
274+ field->getType ().withCVRQualifiers (base.getVRQualifiers ());
275+
276+ assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" );
277+ LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
278+ return LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
279+ }
280+
214281LValue CIRGenFunction::buildLValueForField (LValue base,
215282 const FieldDecl *field) {
283+
216284 LValueBaseInfo BaseInfo = base.getBaseInfo ();
217285
218- if (field->isBitField ()) {
219- llvm_unreachable (" NYI" );
220- }
286+ if (field->isBitField ())
287+ return buildLValueForBitField (base, field);
221288
222289 // Fields of may-alias structures are may-alais themselves.
223290 // FIXME: this hould get propagated down through anonymous structs and unions.
@@ -520,12 +587,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
520587// / method emits the address of the lvalue, then loads the result as an rvalue,
521588// / returning the rvalue.
522589RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
523- assert (LV.isSimple () && " not implemented" );
524590 assert (!LV.getType ()->isFunctionType ());
525591 assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" );
526592
527- // Everything needs a load.
528- return RValue::get (buildLoadOfScalar (LV, Loc));
593+ if (LV.isBitField ())
594+ return buildLoadOfBitfieldLValue (LV, Loc);
595+
596+ if (LV.isSimple ())
597+ return RValue::get (buildLoadOfScalar (LV, Loc));
598+ llvm_unreachable (" NYI" );
599+ }
600+
601+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
602+ SourceLocation Loc) {
603+ const CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
604+
605+ // Get the output type.
606+ mlir::Type ResLTy = convertType (LV.getType ());
607+ Address Ptr = LV.getBitFieldAddress ();
608+ mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr);
609+ auto ValWidth = Val.getType ().cast <IntType>().getWidth ();
610+
611+ bool UseVolatile = LV.isVolatileQualified () &&
612+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
613+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
614+ const unsigned StorageSize =
615+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
616+
617+ if (Info.IsSigned ) {
618+ assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
619+
620+ mlir::Type typ = builder.getSIntNTy (ValWidth);
621+ Val = builder.createIntCast (Val, typ);
622+
623+ unsigned HighBits = StorageSize - Offset - Info.Size ;
624+ if (HighBits)
625+ Val = builder.createShiftLeft (Val, HighBits);
626+ if (Offset + HighBits)
627+ Val = builder.createShiftRight (Val, Offset + HighBits);
628+ } else {
629+ if (Offset)
630+ Val = builder.createShiftRight (Val, Offset);
631+
632+ if (static_cast <unsigned >(Offset) + Info.Size < StorageSize)
633+ Val = builder.createAnd (Val,
634+ llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
635+ }
636+ Val = builder.createIntCast (Val, ResLTy);
637+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
638+ return RValue::get (Val);
529639}
530640
531641void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -548,6 +658,81 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
548658 buildStoreOfScalar (Src.getScalarVal (), Dst);
549659}
550660
661+ void CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
662+ mlir::Value &Result) {
663+ const CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
664+ mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
665+ Address Ptr = Dst.getBitFieldAddress ();
666+
667+ // Get the source value, truncated to the width of the bit-field.
668+ mlir::Value SrcVal = Src.getScalarVal ();
669+
670+ // Cast the source to the storage type and shift it into place.
671+ SrcVal = builder.createIntCast (SrcVal, Ptr.getElementType ());
672+ auto SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
673+ mlir::Value MaskedVal = SrcVal;
674+
675+ const bool UseVolatile =
676+ CGM.getCodeGenOpts ().AAPCSBitfieldWidth && Dst.isVolatileQualified () &&
677+ Info.VolatileStorageSize != 0 && isAAPCS (CGM.getTarget ());
678+ const unsigned StorageSize =
679+ UseVolatile ? Info.VolatileStorageSize : Info.StorageSize ;
680+ const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset ;
681+ // See if there are other bits in the bitfield's storage we'll need to load
682+ // and mask together with source before storing.
683+ if (StorageSize != Info.Size ) {
684+ assert (StorageSize > Info.Size && " Invalid bitfield size." );
685+
686+ mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
687+
688+ // Mask the source value as needed.
689+ if (!hasBooleanRepresentation (Dst.getType ()))
690+ SrcVal = builder.createAnd (
691+ SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
692+
693+ MaskedVal = SrcVal;
694+ if (Offset)
695+ SrcVal = builder.createShiftLeft (SrcVal, Offset);
696+
697+ // Mask out the original value.
698+ Val = builder.createAnd (
699+ Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
700+
701+ // Or together the unchanged values and the source value.
702+ SrcVal = builder.createOr (Val, SrcVal);
703+
704+ } else {
705+ // According to the AACPS:
706+ // When a volatile bit-field is written, and its container does not overlap
707+ // with any non-bit-field member, its container must be read exactly once
708+ // and written exactly once using the access width appropriate to the type
709+ // of the container. The two accesses are not atomic.
710+ llvm_unreachable (" volatile bit-field is not implemented for the AACPS" );
711+ }
712+
713+ // Write the new value back out.
714+ // TODO: constant matrix type, volatile, no init, non temporal, TBAA
715+ buildStoreOfScalar (SrcVal, Ptr, Dst.isVolatileQualified (), Dst.getType (),
716+ Dst.getBaseInfo (), false , false );
717+
718+ // Return the new value of the bit-field.
719+ mlir::Value ResultVal = MaskedVal;
720+ ResultVal = builder.createIntCast (ResultVal, ResLTy);
721+
722+ // Sign extend the value if needed.
723+ if (Info.IsSigned ) {
724+ assert (Info.Size <= StorageSize);
725+ unsigned HighBits = StorageSize - Info.Size ;
726+
727+ if (HighBits) {
728+ ResultVal = builder.createShiftLeft (ResultVal, HighBits);
729+ ResultVal = builder.createShiftRight (ResultVal, HighBits);
730+ }
731+ }
732+
733+ Result = buildFromMemory (ResultVal, Dst.getType ());
734+ }
735+
551736static LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const Expr *E,
552737 const VarDecl *VD) {
553738 QualType T = E->getType ();
@@ -771,7 +956,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
771956 LValue LV = buildLValue (E->getLHS ());
772957
773958 SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
774- buildStoreThroughLValue (RV, LV);
959+ if (LV.isBitField ()) {
960+ mlir::Value result;
961+ buildStoreThroughBitfieldLValue (RV, LV, result);
962+ } else {
963+ buildStoreThroughLValue (RV, LV);
964+ }
965+
775966 assert (!getContext ().getLangOpts ().OpenMP &&
776967 " last priv cond not implemented" );
777968 return LV;
@@ -2207,6 +2398,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
22072398
22082399mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
22092400 SourceLocation Loc) {
2401+ return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2402+ lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2403+ lvalue.isNontemporal ());
2404+ }
2405+
2406+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2407+ mlir::Location Loc) {
22102408 return buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
22112409 lvalue.getType (), Loc, lvalue.getBaseInfo (),
22122410 lvalue.isNontemporal ());
@@ -2224,6 +2422,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22242422 QualType Ty, SourceLocation Loc,
22252423 LValueBaseInfo BaseInfo,
22262424 bool isNontemporal) {
2425+ return buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2426+ isNontemporal);
2427+ }
2428+
2429+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool Volatile,
2430+ QualType Ty, mlir::Location Loc,
2431+ LValueBaseInfo BaseInfo,
2432+ bool isNontemporal) {
22272433 if (!CGM.getCodeGenOpts ().PreserveVec3Type ) {
22282434 if (Ty->isVectorType ()) {
22292435 llvm_unreachable (" NYI" );
@@ -2237,15 +2443,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22372443 }
22382444
22392445 mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2240- getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2446+ Loc, Addr.getElementType (), Addr.getPointer ());
22412447
22422448 if (isNontemporal) {
22432449 llvm_unreachable (" NYI" );
22442450 }
2245-
2246- // TODO: TBAA
2247-
2248- // TODO: buildScalarRangeCheck
2451+
2452+ assert (!UnimplementedFeature::tbaa () && " NYI" );
2453+ assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" );
22492454
22502455 return buildFromMemory (Load, Ty);
22512456}
0 commit comments