2121#include  " clang/AST/GlobalDecl.h" 
2222#include  " clang/Basic/Builtins.h" 
2323#include  " clang/CIR/Dialect/IR/CIRDialect.h" 
24+ #include  " clang/CIR/Dialect/IR/CIROpsEnums.h" 
2425#include  " clang/CIR/Dialect/IR/CIRTypes.h" 
2526#include  " llvm/Support/Casting.h" 
2627#include  " llvm/Support/ErrorHandling.h" 
@@ -128,6 +129,7 @@ static Address buildPointerWithAlignment(const Expr *E,
128129        if  (PtrTy->getPointeeType ()->isVoidType ())
129130          break ;
130131        assert (!UnimplementedFeature::tbaa ());
132+ 
131133        LValueBaseInfo InnerBaseInfo;
132134        Address Addr = CGF.buildPointerWithAlignment (
133135            CE->getSubExpr (), &InnerBaseInfo, IsKnownNonNull);
@@ -211,13 +213,79 @@ static Address buildPointerWithAlignment(const Expr *E,
211213  return  Address (CGF.buildScalarExpr (E), Align);
212214}
213215
216+ // / Helper method to check if the underlying ABI is AAPCS
217+ static  bool  isAAPCS (const  TargetInfo &TargetInfo) {
218+   return  TargetInfo.getABI ().starts_with (" aapcs" 
219+ }
220+ 
221+ Address CIRGenFunction::getAddrOfBitFieldStorage (LValue base, 
222+                                                  const  FieldDecl *field,
223+                                                  unsigned  index,
224+                                                  unsigned  size) {
225+   if  (index == 0 )
226+     return  base.getAddress ();
227+ 
228+   auto  loc = getLoc (field->getLocation ());  
229+   auto  fieldType = builder.getUIntNTy (size);
230+ 
231+   auto  fieldPtr =
232+       mlir::cir::PointerType::get (getBuilder ().getContext (), fieldType);
233+   auto  sea = getBuilder ().createGetMember (
234+     loc, fieldPtr, base.getPointer (), field->getName (), index);
235+ 
236+   return  Address (sea, CharUnits::One ());
237+ }
238+ 
239+ static  bool  useVolatileForBitField (const  CIRGenModule &cgm, LValue base,
240+                                    const  CIRGenBitFieldInfo &info,
241+                                    const  FieldDecl *field) {
242+   return  isAAPCS (cgm.getTarget ()) && cgm.getCodeGenOpts ().AAPCSBitfieldWidth  &&
243+          info.VolatileStorageSize  != 0  &&
244+          field->getType ()
245+              .withCVRQualifiers (base.getVRQualifiers ())
246+              .isVolatileQualified ();
247+ }
248+ 
249+ LValue CIRGenFunction::buildLValueForBitField (LValue base,
250+                                               const  FieldDecl *field) {
251+ 
252+   LValueBaseInfo BaseInfo = base.getBaseInfo ();
253+   const  RecordDecl *rec = field->getParent ();
254+   auto  &layout = CGM.getTypes ().getCIRGenRecordLayout (field->getParent ());
255+   auto  &info = layout.getBitFieldInfo (field);
256+   auto  useVolatile = useVolatileForBitField (CGM, base, info, field);
257+   unsigned  Idx = layout.getCIRFieldNo (field);
258+ 
259+   if  (useVolatile ||
260+       (IsInPreservedAIRegion ||
261+        (getDebugInfo () && rec->hasAttr <BPFPreserveAccessIndexAttr>()))) {
262+     llvm_unreachable (" NYI" 
263+   }
264+ 
265+   const  unsigned  SS = useVolatile ? info.VolatileStorageSize  : info.StorageSize ;
266+   Address Addr = getAddrOfBitFieldStorage (base, field, Idx, SS);
267+  
268+   //  Get the access type.
269+   mlir::Type FieldIntTy = builder.getUIntNTy (SS);
270+ 
271+   auto  loc = getLoc (field->getLocation ());
272+   if  (Addr.getElementType () != FieldIntTy)
273+     Addr = builder.createElementBitCast (loc, Addr, FieldIntTy);
274+ 
275+   QualType fieldType =
276+       field->getType ().withCVRQualifiers (base.getVRQualifiers ());
277+   
278+   assert (!UnimplementedFeature::tbaa () && " NYI TBAA for bit fields" 
279+   LValueBaseInfo FieldBaseInfo (BaseInfo.getAlignmentSource ());
280+   return  LValue::MakeBitfield (Addr, info, fieldType, FieldBaseInfo);
281+ }
282+ 
214283LValue CIRGenFunction::buildLValueForField (LValue base,
215284                                           const  FieldDecl *field) {
216285  LValueBaseInfo BaseInfo = base.getBaseInfo ();
217286
218-   if  (field->isBitField ()) {
219-     llvm_unreachable (" NYI" 
220-   }
287+   if  (field->isBitField ())
288+     return  buildLValueForBitField (base, field);
221289
222290  //  Fields of may-alias structures are may-alais themselves.
223291  //  FIXME: this hould get propagated down through anonymous structs and unions.
@@ -518,12 +586,55 @@ void CIRGenFunction::buildStoreOfScalar(mlir::Value value, LValue lvalue,
518586// / method emits the address of the lvalue, then loads the result as an rvalue,
519587// / returning the rvalue.
520588RValue CIRGenFunction::buildLoadOfLValue (LValue LV, SourceLocation Loc) {
521-   assert (LV.isSimple () && " not implemented" 
522589  assert (!LV.getType ()->isFunctionType ());
523590  assert (!(LV.getType ()->isConstantMatrixType ()) && " not implemented" 
524591
525-   //  Everything needs a load.
526-   return  RValue::get (buildLoadOfScalar (LV, Loc));
592+   if  (LV.isBitField ())
593+     return  buildLoadOfBitfieldLValue (LV, Loc);
594+ 
595+   if  (LV.isSimple ())
596+     return  RValue::get (buildLoadOfScalar (LV, Loc));
597+   llvm_unreachable (" NYI" 
598+ }
599+ 
600+ RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
601+                                                  SourceLocation Loc) {
602+   const  CIRGenBitFieldInfo &Info = LV.getBitFieldInfo ();
603+ 
604+   //  Get the output type.
605+   mlir::Type ResLTy = convertType (LV.getType ());
606+   Address Ptr = LV.getBitFieldAddress ();
607+   mlir::Value Val = builder.createLoad (getLoc (Loc), Ptr);
608+   auto  ValWidth = Val.getType ().cast <IntType>().getWidth ();
609+ 
610+   bool  UseVolatile = LV.isVolatileQualified () &&
611+                      Info.VolatileStorageSize  != 0  && isAAPCS (CGM.getTarget ());
612+   const  unsigned  Offset = UseVolatile ? Info.VolatileOffset  : Info.Offset ;
613+   const  unsigned  StorageSize =
614+       UseVolatile ? Info.VolatileStorageSize  : Info.StorageSize ;
615+ 
616+   if  (Info.IsSigned ) {
617+     assert (static_cast <unsigned >(Offset + Info.Size ) <= StorageSize);
618+ 
619+     mlir::Type typ = builder.getSIntNTy (ValWidth);
620+     Val = builder.createIntCast (Val, typ);
621+ 
622+     unsigned  HighBits = StorageSize - Offset - Info.Size ;
623+     if  (HighBits)
624+       Val = builder.createShiftLeft (Val, HighBits);
625+     if  (Offset + HighBits)
626+       Val = builder.createShiftRight (Val, Offset + HighBits);
627+   } else  {
628+     if  (Offset)
629+       Val = builder.createShiftRight (Val, Offset);
630+ 
631+     if  (static_cast <unsigned >(Offset) + Info.Size  < StorageSize)
632+       Val = builder.createAnd (Val,
633+                               llvm::APInt::getLowBitsSet (ValWidth, Info.Size ));
634+   }
635+   Val = builder.createIntCast (Val, ResLTy);  
636+   assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" 
637+   return  RValue::get (Val);
527638}
528639
529640void  CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst) {
@@ -546,6 +657,83 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst) {
546657  buildStoreOfScalar (Src.getScalarVal (), Dst);
547658}
548659
660+ void  CIRGenFunction::buildStoreThroughBitfieldLValue (RValue Src, LValue Dst,
661+                                                      mlir::Value &Result) {
662+   const  CIRGenBitFieldInfo &Info = Dst.getBitFieldInfo ();
663+   mlir::Type ResLTy = getTypes ().convertTypeForMem (Dst.getType ());
664+   Address Ptr = Dst.getBitFieldAddress ();
665+ 
666+   //  Get the source value, truncated to the width of the bit-field.
667+   mlir::Value SrcVal = Src.getScalarVal ();
668+ 
669+   //  Cast the source to the storage type and shift it into place.
670+   SrcVal = builder.createIntCast (SrcVal, Ptr.getElementType ());
671+   auto  SrcWidth = SrcVal.getType ().cast <IntType>().getWidth ();
672+   mlir::Value MaskedVal = SrcVal;
673+ 
674+   const  bool  UseVolatile =
675+       CGM.getCodeGenOpts ().AAPCSBitfieldWidth  && Dst.isVolatileQualified () &&
676+       Info.VolatileStorageSize  != 0  && isAAPCS (CGM.getTarget ());
677+   const  unsigned  StorageSize =
678+       UseVolatile ? Info.VolatileStorageSize  : Info.StorageSize ;
679+   const  unsigned  Offset = UseVolatile ? Info.VolatileOffset  : Info.Offset ;
680+   //  See if there are other bits in the bitfield's storage we'll need to load
681+   //  and mask together with source before storing.
682+   if  (StorageSize != Info.Size ) {
683+     assert (StorageSize > Info.Size  && " Invalid bitfield size." 
684+ 
685+     mlir::Value Val = buildLoadOfScalar (Dst, Dst.getPointer ().getLoc ());
686+ 
687+     //  Mask the source value as needed.
688+     if  (!hasBooleanRepresentation (Dst.getType ()))
689+       SrcVal = builder.createAnd (
690+           SrcVal, llvm::APInt::getLowBitsSet (SrcWidth, Info.Size ));
691+ 
692+     MaskedVal = SrcVal;
693+     if  (Offset)
694+       SrcVal = builder.createShiftLeft (SrcVal, Offset);
695+ 
696+     //  Mask out the original value.
697+     Val = builder.createAnd (
698+         Val, ~llvm::APInt::getBitsSet (SrcWidth, Offset, Offset + Info.Size ));
699+ 
700+     //  Or together the unchanged values and the source value.
701+     SrcVal = builder.createOr (Val, SrcVal);
702+ 
703+   } else  {
704+     //  According to the AACPS:
705+     //  When a volatile bit-field is written, and its container does not overlap
706+     //  with any non-bit-field member, its container must be read exactly once
707+     //  and written exactly once using the access width appropriate to the type
708+     //  of the container. The two accesses are not atomic.
709+     if  (Dst.isVolatileQualified () && isAAPCS (CGM.getTarget ()) &&
710+         CGM.getCodeGenOpts ().ForceAAPCSBitfieldLoad )
711+       llvm_unreachable (" volatile bit-field is not implemented for the AACPS" 
712+   }
713+ 
714+   //  Write the new value back out.
715+   //  TODO: constant matrix type, volatile, no init, non temporal, TBAA
716+   buildStoreOfScalar (SrcVal, Ptr, Dst.isVolatileQualified (), Dst.getType (),
717+                      Dst.getBaseInfo (), false , false );
718+ 
719+   //  Return the new value of the bit-field.
720+   mlir::Value ResultVal = MaskedVal;
721+   ResultVal = builder.createIntCast (ResultVal, ResLTy);
722+ 
723+   //  Sign extend the value if needed.
724+   if  (Info.IsSigned ) {
725+     assert (Info.Size  <= StorageSize);
726+     unsigned  HighBits = StorageSize - Info.Size ;
727+ 
728+     if  (HighBits) {
729+       ResultVal = builder.createShiftLeft (ResultVal, HighBits);
730+       ResultVal = builder.createShiftRight (ResultVal, HighBits);
731+     }
732+   }
733+ 
734+   Result = buildFromMemory (ResultVal, Dst.getType ());  
735+ }
736+ 
549737static  LValue buildGlobalVarDeclLValue (CIRGenFunction &CGF, const  Expr *E,
550738                                       const  VarDecl *VD) {
551739  QualType T = E->getType ();
@@ -769,7 +957,13 @@ LValue CIRGenFunction::buildBinaryOperatorLValue(const BinaryOperator *E) {
769957    LValue LV = buildLValue (E->getLHS ());
770958
771959    SourceLocRAIIObject Loc{*this , getLoc (E->getSourceRange ())};
772-     buildStoreThroughLValue (RV, LV);
960+     if  (LV.isBitField ()) {
961+       mlir::Value result;
962+       buildStoreThroughBitfieldLValue (RV, LV, result);
963+     } else  {
964+       buildStoreThroughLValue (RV, LV);
965+     }
966+ 
773967    assert (!getContext ().getLangOpts ().OpenMP  &&
774968           " last priv cond not implemented" 
775969    return  LV;
@@ -2205,6 +2399,13 @@ mlir::Value CIRGenFunction::buildAlloca(StringRef name, QualType ty,
22052399
22062400mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
22072401                                              SourceLocation Loc) {
2402+   return  buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
2403+                            lvalue.getType (), getLoc (Loc), lvalue.getBaseInfo (),
2404+                            lvalue.isNontemporal ());
2405+ }
2406+ 
2407+ mlir::Value CIRGenFunction::buildLoadOfScalar (LValue lvalue,
2408+                                               mlir::Location Loc) {
22082409  return  buildLoadOfScalar (lvalue.getAddress (), lvalue.isVolatile (),
22092410                           lvalue.getType (), Loc, lvalue.getBaseInfo (),
22102411                           lvalue.isNontemporal ());
@@ -2222,6 +2423,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22222423                                              QualType Ty, SourceLocation Loc,
22232424                                              LValueBaseInfo BaseInfo,
22242425                                              bool  isNontemporal) {
2426+   return  buildLoadOfScalar (Addr, Volatile, Ty, getLoc (Loc), BaseInfo,
2427+                            isNontemporal);
2428+ }
2429+ 
2430+ mlir::Value CIRGenFunction::buildLoadOfScalar (Address Addr, bool  Volatile,
2431+                                               QualType Ty, mlir::Location Loc,
2432+                                               LValueBaseInfo BaseInfo,
2433+                                               bool  isNontemporal) {
22252434  //  TODO(CIR): this has fallen out of sync with codegen
22262435
22272436  //  Atomic operations have to be done on integral types
@@ -2231,15 +2440,14 @@ mlir::Value CIRGenFunction::buildLoadOfScalar(Address Addr, bool Volatile,
22312440  }
22322441
22332442  mlir::cir::LoadOp Load = builder.create <mlir::cir::LoadOp>(
2234-       getLoc ( Loc) , Addr.getElementType (), Addr.getPointer ());
2443+       Loc, Addr.getElementType (), Addr.getPointer ());
22352444
22362445  if  (isNontemporal) {
22372446    llvm_unreachable (" NYI" 
22382447  }
2239- 
2240-   //  TODO: TBAA
2241- 
2242-   //  TODO: buildScalarRangeCheck
2448+   
2449+   assert (!UnimplementedFeature::tbaa () && " NYI" 
2450+   assert (!UnimplementedFeature::emitScalarRangeCheck () && " NYI" 
22432451
22442452  return  buildFromMemory (Load, Ty);
22452453}
0 commit comments