aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorRoman Divacky <rdivacky@FreeBSD.org>2009-10-14 18:03:49 +0000
committerRoman Divacky <rdivacky@FreeBSD.org>2009-10-14 18:03:49 +0000
commit4c8b24812ddcd1dedaca343a6d4e76f91f398981 (patch)
tree137ebebcae16fb0ce7ab4af456992bbd8d22fced /lib/CodeGen
parent5362a71c02e7d448a8ce98cf00c47e353fba5d04 (diff)
downloadsrc-4c8b24812ddcd1dedaca343a6d4e76f91f398981.tar.gz
src-4c8b24812ddcd1dedaca343a6d4e76f91f398981.zip
Update clang to r84119.vendor/clang/clang-r84119
Notes
Notes: svn path=/vendor/clang/dist/; revision=198092 svn path=/vendor/clang/clang-84119/; revision=198093; tag=vendor/clang/clang-r84119
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h22
-rw-r--r--lib/CodeGen/CGBlocks.cpp196
-rw-r--r--lib/CodeGen/CGBlocks.h40
-rw-r--r--lib/CodeGen/CGBuiltin.cpp255
-rw-r--r--lib/CodeGen/CGCXX.cpp1639
-rw-r--r--lib/CodeGen/CGCXX.h2
-rw-r--r--lib/CodeGen/CGCXXClass.cpp176
-rw-r--r--lib/CodeGen/CGCXXExpr.cpp304
-rw-r--r--lib/CodeGen/CGCXXTemp.cpp102
-rw-r--r--lib/CodeGen/CGCall.cpp268
-rw-r--r--lib/CodeGen/CGCall.h33
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp686
-rw-r--r--lib/CodeGen/CGDebugInfo.h56
-rw-r--r--lib/CodeGen/CGDecl.cpp291
-rw-r--r--lib/CodeGen/CGExpr.cpp889
-rw-r--r--lib/CodeGen/CGExprAgg.cpp251
-rw-r--r--lib/CodeGen/CGExprComplex.cpp177
-rw-r--r--lib/CodeGen/CGExprConstant.cpp868
-rw-r--r--lib/CodeGen/CGExprScalar.cpp684
-rw-r--r--lib/CodeGen/CGObjC.cpp259
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp999
-rw-r--r--lib/CodeGen/CGObjCMac.cpp2817
-rw-r--r--lib/CodeGen/CGObjCRuntime.h49
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp386
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.h134
-rw-r--r--lib/CodeGen/CGRtti.cpp47
-rw-r--r--lib/CodeGen/CGStmt.cpp330
-rw-r--r--lib/CodeGen/CGValue.h124
-rw-r--r--lib/CodeGen/CGVtable.cpp557
-rw-r--r--lib/CodeGen/CGVtable.h61
-rw-r--r--lib/CodeGen/CMakeLists.txt11
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp437
-rw-r--r--lib/CodeGen/CodeGenFunction.h291
-rw-r--r--lib/CodeGen/CodeGenModule.cpp846
-rw-r--r--lib/CodeGen/CodeGenModule.h132
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp376
-rw-r--r--lib/CodeGen/CodeGenTypes.h124
-rw-r--r--lib/CodeGen/Makefile3
-rw-r--r--lib/CodeGen/Mangle.cpp1098
-rw-r--r--lib/CodeGen/Mangle.h48
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp16
-rw-r--r--lib/CodeGen/README.txt18
-rw-r--r--lib/CodeGen/TargetABIInfo.cpp647
43 files changed, 11437 insertions, 5312 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 58e5a778cf33..1ab2f55295fa 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -17,6 +17,7 @@
namespace llvm {
class Type;
class Value;
+ class LLVMContext;
}
namespace clang {
@@ -71,11 +72,12 @@ namespace clang {
Kind TheKind;
const llvm::Type *TypeData;
unsigned UIntData;
+ bool BoolData;
ABIArgInfo(Kind K, const llvm::Type *TD=0,
- unsigned UI=0) : TheKind(K),
- TypeData(TD),
- UIntData(UI) {}
+ unsigned UI=0, bool B = false)
+ : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
@@ -91,8 +93,8 @@ namespace clang {
static ABIArgInfo getCoerce(const llvm::Type *T) {
return ABIArgInfo(Coerce, T);
}
- static ABIArgInfo getIndirect(unsigned Alignment) {
- return ABIArgInfo(Indirect, 0, Alignment);
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal);
}
static ABIArgInfo getExpand() {
return ABIArgInfo(Expand);
@@ -112,12 +114,17 @@ namespace clang {
return TypeData;
}
- // ByVal accessors
+ // Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
return UIntData;
}
+ bool getIndirectByVal() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData;
+ }
+
void dump() const;
};
@@ -128,7 +135,8 @@ namespace clang {
virtual ~ABIInfo();
virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
- ASTContext &Ctx) const = 0;
+ ASTContext &Ctx,
+ llvm::LLVMContext &VMContext) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index d5f803ba98bf..736425e01276 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -11,12 +11,15 @@
//
//===----------------------------------------------------------------------===//
+#include "CGDebugInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
#include "llvm/Target/TargetData.h"
#include <algorithm>
+#include <cstdio>
+
using namespace clang;
using namespace CodeGen;
@@ -48,24 +51,24 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
}
- C = llvm::ConstantStruct::get(Elts);
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
- C = new llvm::GlobalVariable(C->getType(), true,
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage,
- C, "__block_descriptor_tmp", &CGM.getModule());
+ C, "__block_descriptor_tmp");
return C;
}
llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
if (NSConcreteGlobalBlock == 0)
- NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
"_NSConcreteGlobalBlock");
return NSConcreteGlobalBlock;
}
llvm::Constant *BlockModule::getNSConcreteStackBlock() {
if (NSConcreteStackBlock == 0)
- NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
"_NSConcreteStackBlock");
return NSConcreteStackBlock;
}
@@ -125,7 +128,8 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
bool subBlockHasCopyDispose = false;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, LocalDeclMap,
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+ LocalDeclMap,
subBlockSize,
subBlockAlign,
subBlockDeclRefDecls,
@@ -161,13 +165,13 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
Elts[0] = CGM.getNSConcreteGlobalBlock();
Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
- C = llvm::ConstantStruct::get(Elts);
+ C = llvm::ConstantStruct::get(VMContext, Elts, false);
char Name[32];
sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
- C = new llvm::GlobalVariable(C->getType(), true,
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
llvm::GlobalValue::InternalLinkage,
- C, Name, &CGM.getModule());
+ C, Name);
QualType BPT = BE->getType();
C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
return C;
@@ -183,13 +187,12 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
- uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl());
- Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0);
+ Types[i+5] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
} else
Types[i+5] = ConvertType(Ty);
}
- llvm::StructType *Ty = llvm::StructType::get(Types, true);
+ llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
llvm::AllocaInst *A = CreateTempAlloca(Ty);
A->setAlignment(subBlockAlign);
@@ -217,20 +220,21 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp");
NoteForHelper[helpersize].index = i+5;
- NoteForHelper[helpersize].RequiresCopying = BlockRequiresCopying(VD->getType());
+ NoteForHelper[helpersize].RequiresCopying
+ = BlockRequiresCopying(VD->getType());
NoteForHelper[helpersize].flag
- = VD->getType()->isBlockPointerType() ? BLOCK_FIELD_IS_BLOCK : BLOCK_FIELD_IS_OBJECT;
+ = (VD->getType()->isBlockPointerType()
+ ? BLOCK_FIELD_IS_BLOCK
+ : BLOCK_FIELD_IS_OBJECT);
if (LocalDeclMap[VD]) {
if (BDRE->isByRef()) {
NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
// FIXME: Someone double check this.
(VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
- const llvm::Type *Ty = Types[i+5];
llvm::Value *Loc = LocalDeclMap[VD];
Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
Loc = Builder.CreateLoad(Loc, false);
- Loc = Builder.CreateBitCast(Loc, Ty);
Builder.CreateStore(Loc, Addr);
++helpersize;
continue;
@@ -265,7 +269,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
offset),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
@@ -310,7 +314,8 @@ const llvm::Type *BlockModule::getBlockDescriptorType() {
// unsigned long reserved;
// unsigned long block_size;
// };
- BlockDescriptorType = llvm::StructType::get(UnsignedLongTy,
+ BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
+ UnsignedLongTy,
UnsignedLongTy,
NULL);
@@ -337,7 +342,8 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
// };
- GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+ PtrToInt8Ty,
IntTy,
IntTy,
PtrToInt8Ty,
@@ -369,7 +375,8 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
// void *__copy_func_helper_decl;
// void *__destroy_func_decl;
// };
- GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty,
+ GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+ PtrToInt8Ty,
IntTy,
IntTy,
PtrToInt8Ty,
@@ -384,9 +391,13 @@ const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
return GenericExtendedBlockLiteralType;
}
+bool BlockFunction::BlockRequiresCopying(QualType Ty) {
+ return CGM.BlockRequiresCopying(Ty);
+}
+
RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
const BlockPointerType *BPT =
- E->getCallee()->getType()->getAsBlockPointerType();
+ E->getCallee()->getType()->getAs<BlockPointerType>();
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
@@ -403,7 +414,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
BlockLiteral =
Builder.CreateBitCast(BlockLiteral,
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty),
+ llvm::Type::getInt8PtrTy(VMContext),
"tmp");
// Add the block literal.
@@ -414,33 +425,33 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) {
QualType FnType = BPT->getPointeeType();
// And the rest of the arguments.
- EmitCallArgs(Args, FnType->getAsFunctionProtoType(),
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
E->arg_begin(), E->arg_end());
// Load the function.
llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp");
- QualType ResultType = FnType->getAsFunctionType()->getResultType();
+ QualType ResultType = FnType->getAs<FunctionType>()->getResultType();
- const CGFunctionInfo &FnInfo =
+ const CGFunctionInfo &FnInfo =
CGM.getTypes().getFunctionInfo(ResultType, Args);
-
+
// Cast the function pointer to the right type.
- const llvm::Type *BlockFTy =
+ const llvm::Type *BlockFTy =
CGM.getTypes().GetFunctionType(FnInfo, false);
-
+
const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
-
+
// And call the block.
return EmitCall(FnInfo, Func, Args);
}
llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
- uint64_t &offset = BlockDecls[E->getDecl()];
+ const ValueDecl *VD = E->getDecl();
+
+ uint64_t &offset = BlockDecls[VD];
- const llvm::Type *Ty;
- Ty = CGM.getTypes().ConvertType(E->getDecl()->getType());
// See if we have already allocated an offset for this variable.
if (offset == 0) {
@@ -453,25 +464,27 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::Int64Ty,
- offset),
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset),
"block.literal");
if (E->isByRef()) {
- bool needsCopyDispose = BlockRequiresCopying(E->getType());
- uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl());
const llvm::Type *PtrStructTy
- = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0);
+ = llvm::PointerType::get(BuildByRefType(VD), 0);
// The block literal will need a copy/destroy helper.
BlockHasCopyDispose = true;
- Ty = PtrStructTy;
+
+ const llvm::Type *Ty = PtrStructTy;
Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
V = Builder.CreateLoad(V, false);
V = Builder.CreateStructGEP(V, 1, "forwarding");
V = Builder.CreateLoad(V, false);
V = Builder.CreateBitCast(V, PtrStructTy);
- V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
} else {
+ const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
+
Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
}
@@ -507,16 +520,16 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// block literal struct.
uint64_t BlockLiteralSize =
TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
- DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
+ DescriptorFields[1] =
+ llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
llvm::Constant *DescriptorStruct =
- llvm::ConstantStruct::get(&DescriptorFields[0], 2);
+ llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false);
llvm::GlobalVariable *Descriptor =
- new llvm::GlobalVariable(DescriptorStruct->getType(), true,
+ new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
llvm::GlobalVariable::InternalLinkage,
- DescriptorStruct, "__block_descriptor_global",
- &getModule());
+ DescriptorStruct, "__block_descriptor_global");
// Generate the constants for the block literal.
llvm::Constant *LiteralFields[5];
@@ -552,13 +565,12 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
LiteralFields[4] = Descriptor;
llvm::Constant *BlockLiteralStruct =
- llvm::ConstantStruct::get(&LiteralFields[0], 5);
+ llvm::ConstantStruct::get(VMContext, &LiteralFields[0], 5, false);
llvm::GlobalVariable *BlockLiteral =
- new llvm::GlobalVariable(BlockLiteralStruct->getType(), true,
+ new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
llvm::GlobalVariable::InternalLinkage,
- BlockLiteralStruct, "__block_literal_global",
- &getModule());
+ BlockLiteralStruct, "__block_literal_global");
return BlockLiteral;
}
@@ -580,7 +592,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
// Check if we should generate debug info for this block.
if (CGM.getDebugInfo())
DebugInfo = CGM.getDebugInfo();
-
+
// Arrange for local static and local extern declarations to appear
// to be local to this function as well, as they are directly referenced
// in a block.
@@ -588,7 +600,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
i != ldm.end();
++i) {
const VarDecl *VD = dyn_cast<VarDecl>(i->first);
-
+
if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
LocalDeclMap[VD] = i->second;
}
@@ -606,12 +618,11 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const FunctionType *BlockFunctionType = BExpr->getFunctionType();
QualType ResultType;
bool IsVariadic;
- if (const FunctionProtoType *FTy =
+ if (const FunctionProtoType *FTy =
dyn_cast<FunctionProtoType>(BlockFunctionType)) {
ResultType = FTy->getResultType();
IsVariadic = FTy->isVariadic();
- }
- else {
+ } else {
// K&R style block.
ResultType = BlockFunctionType->getResultType();
IsVariadic = false;
@@ -650,9 +661,44 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
StartFunction(BD, ResultType, Fn, Args,
BExpr->getBody()->getLocEnd());
+
CurFuncDecl = OuterFuncDecl;
CurCodeDecl = BD;
+
+ // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
EmitStmt(BExpr->getBody());
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ // Emit debug information for all the BlockDeclRefDecls.
+ for (unsigned i=0; i < BlockDeclRefDecls.size(); ++i) {
+ const Expr *E = BlockDeclRefDecls[i];
+ const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+ if (BDRE) {
+ const ValueDecl *D = BDRE->getDecl();
+ DI->setLocation(D->getLocation());
+ DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
+ LocalDeclMap[getBlockStructDecl()],
+ Builder, this);
+ }
+ }
+ }
+ // And resume where we left off.
+ if (resume == 0)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
// The runtime needs a minimum alignment of a void *.
@@ -687,13 +733,12 @@ uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
uint64_t Pad = BlockOffset - OldOffset;
if (Pad) {
- llvm::ArrayType::get(llvm::Type::Int8Ty, Pad);
+ llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad);
QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
llvm::APInt(32, Pad),
ArrayType::Normal, 0);
ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
- 0, QualType(PadTy), VarDecl::None,
- SourceLocation());
+ 0, QualType(PadTy), 0, VarDecl::None);
Expr *E;
E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
SourceLocation(), false, false);
@@ -720,7 +765,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -740,7 +785,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R,
+ SourceLocation(), II, R, 0,
FunctionDecl::Static, false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -776,7 +821,8 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
- llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *N = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(T->getContext()), flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, Dstv, Srcv, N);
}
@@ -801,7 +847,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -821,7 +867,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R,
+ SourceLocation(), II, R, 0,
FunctionDecl::Static, false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -885,7 +931,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -905,7 +951,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R,
+ SourceLocation(), II, R, 0,
FunctionDecl::Static, false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -924,10 +970,11 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
V = Builder.CreateStructGEP(V, 6, "x");
V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
llvm::Value *SrcObj = Builder.CreateLoad(V);
-
+
flag |= BLOCK_BYREF_CALLER;
- llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ llvm::Value *N = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(T->getContext()), flag);
llvm::Value *F = getBlockObjectAssign();
Builder.CreateCall3(F, DstObj, SrcObj, N);
@@ -948,7 +995,7 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -968,7 +1015,7 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
FunctionDecl *FD = FunctionDecl::Create(getContext(),
getContext().getTranslationUnitDecl(),
- SourceLocation(), II, R,
+ SourceLocation(), II, R, 0,
FunctionDecl::Static, false,
true);
CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
@@ -1024,9 +1071,9 @@ llvm::Value *BlockFunction::getBlockObjectDispose() {
if (CGM.BlockObjectDispose == 0) {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::VoidTy;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::Int32Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectDispose
= CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
@@ -1038,10 +1085,10 @@ llvm::Value *BlockFunction::getBlockObjectAssign() {
if (CGM.BlockObjectAssign == 0) {
const llvm::FunctionType *FTy;
std::vector<const llvm::Type*> ArgTys;
- const llvm::Type *ResultType = llvm::Type::VoidTy;
+ const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
ArgTys.push_back(PtrToInt8Ty);
ArgTys.push_back(PtrToInt8Ty);
- ArgTys.push_back(llvm::Type::Int32Ty);
+ ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
CGM.BlockObjectAssign
= CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
@@ -1053,7 +1100,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
llvm::Value *F = getBlockObjectDispose();
llvm::Value *N;
V = Builder.CreateBitCast(V, PtrToInt8Ty);
- N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag);
+ N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
Builder.CreateCall2(F, V, N);
}
@@ -1061,8 +1108,9 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
CGBuilderTy &B)
- : CGM(cgm), CGF(cgf), Builder(B) {
- PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
+ PtrToInt8Ty = llvm::PointerType::getUnqual(
+ llvm::Type::getInt8Ty(VMContext));
BlockHasCopyDispose = false;
}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 5d46ac78f693..3a860c0d3c36 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -16,6 +16,7 @@
#include "CodeGenTypes.h"
#include "clang/AST/Type.h"
+#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
@@ -38,6 +39,7 @@ namespace llvm {
class TargetData;
class FunctionType;
class Value;
+ class LLVMContext;
}
namespace clang {
@@ -63,7 +65,8 @@ class BlockModule : public BlockBase {
const llvm::TargetData &TheTargetData;
CodeGenTypes &Types;
CodeGenModule &CGM;
-
+ llvm::LLVMContext &VMContext;
+
ASTContext &getContext() const { return Context; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
@@ -86,7 +89,7 @@ public:
/// NSConcreteStackBlock - Cached reference to the class poinnter for stack
/// blocks.
llvm::Constant *NSConcreteStackBlock;
-
+
const llvm::Type *BlockDescriptorType;
const llvm::Type *GenericBlockLiteralType;
const llvm::Type *GenericExtendedBlockLiteralType;
@@ -104,12 +107,22 @@ public:
BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
CodeGenTypes &T, CodeGenModule &CodeGen)
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
- CGM(CodeGen),
+ CGM(CodeGen), VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
BlockObjectAssign(0), BlockObjectDispose(0) {
Block.GlobalUniqueCount = 0;
- PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+ }
+
+ bool BlockRequiresCopying(QualType Ty) {
+ if (Ty->isBlockPointerType())
+ return true;
+ if (getContext().isObjCNSObjectType(Ty))
+ return true;
+ if (Ty->isObjCObjectPointerType())
+ return true;
+ return false;
}
};
@@ -118,6 +131,9 @@ class BlockFunction : public BlockBase {
CodeGenFunction &CGF;
ASTContext &getContext() const;
+protected:
+ llvm::LLVMContext &VMContext;
+
public:
const llvm::Type *PtrToInt8Ty;
struct HelperInfo {
@@ -150,11 +166,11 @@ public:
/// ByCopyDeclRefs - Variables from parent scopes that have been imported
/// into this block.
llvm::SmallVector<const BlockDeclRefExpr *, 8> ByCopyDeclRefs;
-
- // ByRefDeclRefs - __block variables from parent scopes that have been
+
+ // ByRefDeclRefs - __block variables from parent scopes that have been
// imported into this block.
llvm::SmallVector<const BlockDeclRefExpr *, 8> ByRefDeclRefs;
-
+
BlockInfo(const llvm::Type *blt, const char *n)
: BlockLiteralTy(blt), Name(n) {
// Skip asm prefix, if any.
@@ -212,15 +228,7 @@ public:
llvm::Value *getBlockObjectDispose();
void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
- bool BlockRequiresCopying(QualType Ty) {
- if (Ty->isBlockPointerType())
- return true;
- if (getContext().isObjCNSObjectType(Ty))
- return true;
- if (getContext().isObjCObjectPointerType(Ty))
- return true;
- return false;
- }
+ bool BlockRequiresCopying(QualType Ty);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a919dfa2e32c..987cd24e2c8b 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -25,21 +25,21 @@ using namespace llvm;
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
-static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
+static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
Intrinsic::ID Id, const CallExpr *E) {
const llvm::Type *ResType[2];
ResType[0] = CGF.ConvertType(E->getType());
ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
- return RValue::get(CGF.Builder.CreateCall2(AtomF,
- CGF.EmitScalarExpr(E->getArg(0)),
+ return RValue::get(CGF.Builder.CreateCall2(AtomF,
+ CGF.EmitScalarExpr(E->getArg(0)),
CGF.EmitScalarExpr(E->getArg(1))));
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
// the expression node, where the return value is the result of the
// operation.
-static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
const llvm::Type *ResType[2];
@@ -49,25 +49,26 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
-
+
if (Id == Intrinsic::atomic_load_nand)
Result = CGF.Builder.CreateNot(Result);
-
-
+
+
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
}
-RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->Evaluate(Result, CGM.getContext())) {
if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(Result.Val.getInt()));
+ return RValue::get(llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt()));
else if (Result.Val.isFloat())
- return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat()));
+ return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
}
-
+
switch (BuiltinID) {
default: break; // Handle intrinsics and libm functions below.
case Builtin::BI__builtin___CFStringMakeConstantString:
@@ -76,13 +77,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
if (ArgValue->getType() != DestType)
- ArgValue = Builder.CreateBitCast(ArgValue, DestType,
- ArgValue->getNameStart());
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getName().data());
- Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
Intrinsic::vaend : Intrinsic::vastart;
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
}
@@ -90,35 +90,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
- return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
case Builtin::BI__builtin_abs: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
- Value *CmpResult =
- Builder.CreateICmpSGE(ArgValue, Constant::getNullValue(ArgValue->getType()),
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
"abscond");
- Value *Result =
+ Value *Result =
Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
-
+
return RValue::get(Result);
}
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -128,11 +128,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -143,13 +143,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_ffsll: {
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
- ConstantInt::get(ArgType, 1), "tmp");
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ llvm::ConstantInt::get(ArgType, 1), "tmp");
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
@@ -162,13 +162,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_parityll: {
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
- Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1),
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
"tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -178,10 +178,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
@@ -197,7 +197,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
- }
+ }
case Builtin::BI__builtin_object_size: {
// FIXME: Implement. For now we just always fail and pretend we
// don't know the object size.
@@ -205,15 +205,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const llvm::Type *ResType = ConvertType(E->getType());
// bool UseSubObject = TypeArg.getZExtValue() & 1;
bool UseMinimum = TypeArg.getZExtValue() & 2;
- return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
+ return RValue::get(
+ llvm::ConstantInt::get(ResType, UseMinimum ? 0 : -1LL));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
- RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
- ConstantInt::get(llvm::Type::Int32Ty, 0);
- Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
- ConstantInt::get(llvm::Type::Int32Ty, 3);
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
}
@@ -221,7 +222,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
return RValue::get(Builder.CreateCall(F));
}
-
+ case Builtin::BI__builtin_unreachable: {
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
+ }
+
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
@@ -240,9 +246,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
- Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
-
+
switch (BuiltinID) {
default: assert(0 && "Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
@@ -260,7 +266,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_islessgreater:
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
break;
- case Builtin::BI__builtin_isunordered:
+ case Builtin::BI__builtin_isunordered:
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
break;
}
@@ -268,19 +274,24 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
"tmp"));
}
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
// FIXME: LLVM IR Should allow alloca with an i64 size!
Value *Size = EmitScalarExpr(E->getArg(0));
- Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp");
- return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp"));
+ Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
+ return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
}
case Builtin::BI__builtin_bzero: {
Value *Address = EmitScalarExpr(E->getArg(0));
Builder.CreateCall4(CGM.getMemSetFn(), Address,
- llvm::ConstantInt::get(llvm::Type::Int8Ty, 0),
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
EmitScalarExpr(E->getArg(1)),
- llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memcpy: {
@@ -288,7 +299,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall4(CGM.getMemCpyFn(), Address,
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)),
- llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memmove: {
@@ -296,16 +307,16 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Builder.CreateCall4(CGM.getMemMoveFn(), Address,
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)),
- llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_memset: {
Value *Address = EmitScalarExpr(E->getArg(0));
Builder.CreateCall4(CGM.getMemSetFn(), Address,
Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
- llvm::Type::Int8Ty),
+ llvm::Type::getInt8Ty(VMContext)),
EmitScalarExpr(E->getArg(2)),
- llvm::ConstantInt::get(llvm::Type::Int32Ty, 1));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
return RValue::get(Address);
}
case Builtin::BI__builtin_return_address: {
@@ -332,20 +343,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
Value *FrameAddr =
Builder.CreateCall(FrameAddrF,
- Constant::getNullValue(llvm::Type::Int32Ty));
+ Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
Builder.CreateStore(FrameAddr, Buf);
// Call the setjmp intrinsic
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
- const llvm::Type *DestType =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
Buf = Builder.CreateBitCast(Buf, DestType);
return RValue::get(Builder.CreateCall(F, Buf));
}
case Builtin::BI__builtin_longjmp: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
Value *Buf = EmitScalarExpr(E->getArg(0));
- const llvm::Type *DestType =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
Buf = Builder.CreateBitCast(Buf, DestType);
return RValue::get(Builder.CreateCall(F, Buf));
}
@@ -401,7 +410,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
-
+
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
@@ -417,7 +426,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
@@ -454,7 +463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_nand_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
llvm::Instruction::And);
-
+
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
@@ -465,7 +474,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ResType[0]= ConvertType(E->getType());
ResType[1] = ConvertType(E->getArg(0)->getType());
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
- return RValue::get(Builder.CreateCall3(AtomF,
+ return RValue::get(Builder.CreateCall3(AtomF,
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2))));
@@ -482,7 +491,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
Value *OldVal = EmitScalarExpr(E->getArg(1));
- Value *PrevVal = Builder.CreateCall3(AtomF,
+ Value *PrevVal = Builder.CreateCall3(AtomF,
EmitScalarExpr(E->getArg(0)),
OldVal,
EmitScalarExpr(E->getArg(2)));
@@ -511,12 +520,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_synchronize: {
Value *C[5];
- C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1);
- C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0);
+ C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
+ C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
return RValue::get(0);
}
-
+
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
@@ -543,29 +552,31 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
}
-
+
// If this is an alias for a libm function (e.g. __builtin_sin) turn it into
// that function.
if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return EmitCall(CGM.getBuiltinLibFunction(BuiltinID),
+ return EmitCall(CGM.getBuiltinLibFunction(FD, BuiltinID),
E->getCallee()->getType(), E->arg_begin(),
E->arg_end());
-
+
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
- Intrinsic::ID IntrinsicID =
- Intrinsic::getIntrinsicForGCCBuiltin(Target.getTargetPrefix(), Name);
-
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
-
+
Function *F = CGM.getIntrinsic(IntrinsicID);
const llvm::FunctionType *FTy = F->getFunctionType();
-
+
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue = EmitScalarExpr(E->getArg(i));
-
+
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
const llvm::Type *PTy = FTy->getParamType(i);
@@ -574,50 +585,54 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
-
+
Args.push_back(ArgValue);
}
-
+
Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
QualType BuiltinRetType = E->getType();
-
- const llvm::Type *RetTy = llvm::Type::VoidTy;
+
+ const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
-
+
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
}
-
+
return RValue::get(V);
}
-
+
// See if we have a target specific builtin that needs to be lowered.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
return RValue::get(V);
-
+
ErrorUnsupported(E, "builtin function");
-
+
// Unknown builtin, for now just dump it out and return undef.
if (hasAggregateLLVMType(E->getType()))
return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
- return RValue::get(UndefValue::get(ConvertType(E->getType())));
-}
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- const char *TargetPrefix = Target.getTargetPrefix();
- if (strcmp(TargetPrefix, "x86") == 0)
+ switch (Target.getTriple().getArch()) {
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
return EmitX86BuiltinExpr(BuiltinID, E);
- else if (strcmp(TargetPrefix, "ppc") == 0)
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
return EmitPPCBuiltinExpr(BuiltinID, E);
- return 0;
+ default:
+ return 0;
+ }
}
-Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
-
+
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
@@ -625,23 +640,23 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: return 0;
- case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_pslldi128:
case X86::BI__builtin_ia32_psllqi128:
- case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psllwi128:
case X86::BI__builtin_ia32_psradi128:
case X86::BI__builtin_ia32_psrawi128:
case X86::BI__builtin_ia32_psrldi128:
case X86::BI__builtin_ia32_psrlqi128:
case X86::BI__builtin_ia32_psrlwi128: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2);
- llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
+ llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
Ops[1], Zero, "insert");
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
+
switch (BuiltinID) {
default: assert(0 && "Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi128:
@@ -678,22 +693,22 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
- case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_pslldi:
case X86::BI__builtin_ia32_psllqi:
- case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psllwi:
case X86::BI__builtin_ia32_psradi:
case X86::BI__builtin_ia32_psrawi:
case X86::BI__builtin_ia32_psrldi:
case X86::BI__builtin_ia32_psrlqi:
case X86::BI__builtin_ia32_psrlwi: {
- Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext");
- const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1);
+ Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+ const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
+
switch (BuiltinID) {
default: assert(0 && "Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi:
@@ -730,7 +745,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
case X86::BI__builtin_ia32_cmpps: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
@@ -741,17 +756,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
}
case X86::BI__builtin_ia32_ldmxcsr: {
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp");
+ const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+ Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+ Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp, PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
@@ -766,16 +781,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
- const llvm::Type *EltTy = llvm::Type::Int64Ty;
+ const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
-
+
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
-
+
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
- llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index);
+ llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
// cast pointer to i64 & store
@@ -785,9 +800,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
-Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
default: return 0;
}
-}
+}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 5f3acea767d5..3960cf51868f 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -11,22 +11,123 @@
//
//===----------------------------------------------------------------------===//
-// We might split this into multiple files if it gets too unwieldy
+// We might split this into multiple files if it gets too unwieldy
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "Mangle.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
-void
-CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
- llvm::GlobalVariable *GV) {
+void
+CodeGenFunction::EmitCXXGlobalDtorRegistration(const CXXDestructorDecl *Dtor,
+ llvm::Constant *DeclPtr) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ std::vector<const llvm::Type *> Params;
+ Params.push_back(Int8PtrTy);
+
+ // Get the destructor function type
+ const llvm::Type *DtorFnTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+
+ Params.clear();
+ Params.push_back(DtorFnTy);
+ Params.push_back(Int8PtrTy);
+ Params.push_back(Int8PtrTy);
+
+ // Get the __cxa_atexit function type
+ // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
+ const llvm::FunctionType *AtExitFnTy =
+ llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+
+ llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
+ "__cxa_atexit");
+
+ llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
+ "__dso_handle");
+
+ llvm::Constant *DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
+
+ llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
+ llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
+ Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ assert(D.hasGlobalStorage() &&
+ "VarDecl must have global storage!");
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ if (T->isReferenceType()) {
+ ErrorUnsupported(Init, "global variable that binds to a reference");
+ } else if (!hasAggregateLLVMType(T)) {
+ llvm::Value *V = EmitScalarExpr(Init);
+ EmitStoreOfScalar(V, DeclPtr, T.isVolatileQualified(), T);
+ } else if (T->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, DeclPtr, T.isVolatileQualified());
+ } else {
+ EmitAggExpr(Init, DeclPtr, T.isVolatileQualified());
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor())
+ EmitCXXGlobalDtorRegistration(RD->getDestructor(getContext()), DeclPtr);
+ }
+ }
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ if (CXXGlobalInits.empty())
+ return;
+
+ const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ false);
+
+ // Create our global initialization function.
+ // FIXME: Should this be tweakable by targets?
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ "__cxx_global_initialization", &TheModule);
+
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
+ AddGlobalCtor(Fn);
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ const VarDecl **Decls,
+ unsigned NumDecls) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+ SourceLocation());
+
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ const VarDecl *D = Decls[i];
+
+ llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
+ EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+ }
+ FinishFunction();
+}
+
+void
+CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
// FIXME: This should use __cxa_guard_{acquire,release}?
assert(!getContext().getLangOptions().ThreadsafeStatics &&
@@ -34,46 +135,37 @@ CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D,
llvm::SmallString<256> GuardVName;
llvm::raw_svector_ostream GuardVOut(GuardVName);
- mangleGuardVariable(&D, getContext(), GuardVOut);
-
+ mangleGuardVariable(CGM.getMangleContext(), &D, GuardVOut);
+
// Create the guard variable.
- llvm::GlobalValue *GuardV =
- new llvm::GlobalVariable(llvm::Type::Int64Ty, false,
+ llvm::GlobalValue *GuardV =
+ new llvm::GlobalVariable(CGM.getModule(), llvm::Type::getInt64Ty(VMContext), false,
GV->getLinkage(),
- llvm::Constant::getNullValue(llvm::Type::Int64Ty),
- GuardVName.c_str(),
- &CGM.getModule());
-
+ llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)),
+ GuardVName.str());
+
// Load the first byte of the guard variable.
- const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0);
- llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
+ const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+ llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
"tmp");
-
+
// Compare it against 0.
- llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
+ llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext));
llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
-
+
llvm::BasicBlock *InitBlock = createBasicBlock("init");
llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
// If the guard variable is 0, jump to the initializer code.
Builder.CreateCondBr(ICmp, InitBlock, EndBlock);
-
+
EmitBlock(InitBlock);
- const Expr *Init = D.getInit();
- if (!hasAggregateLLVMType(Init->getType())) {
- llvm::Value *V = EmitScalarExpr(Init);
- Builder.CreateStore(V, GV, D.getType().isVolatileQualified());
- } else if (Init->getType()->isAnyComplexType()) {
- EmitComplexExprIntoAddr(Init, GV, D.getType().isVolatileQualified());
- } else {
- EmitAggExpr(Init, GV, D.getType().isVolatileQualified());
- }
-
- Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1),
+ EmitCXXGlobalVarDeclInit(D, GV);
+
+ Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1),
Builder.CreateBitCast(GuardV, PtrTy));
-
+
EmitBlock(EndBlock);
}
@@ -82,336 +174,1399 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
- assert(MD->isInstance() &&
+ assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
-
+ // A call to a trivial destructor requires no code generation.
+ if (const CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(MD))
+ if (Destructor->isTrivial())
+ return RValue::get(0);
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
CallArgList Args;
-
+
// Push the this ptr.
Args.push_back(std::make_pair(RValue::get(This),
MD->getThisType(getContext())));
-
+
// And the rest of the call args
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
-
- QualType ResultType = MD->getType()->getAsFunctionType()->getResultType();
+
+ QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
Callee, Args, MD);
}
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXTemporaryObjectExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) {
+ if (isa<BinaryOperator>(CE->getCallee()))
+ return EmitCXXMemberPointerCallExpr(CE);
+
const MemberExpr *ME = cast<MemberExpr>(CE->getCallee());
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
- const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(Callee, getContext().getPointerType(MD->getType()),
+ CE->arg_begin(), CE->arg_end(), 0);
+
+ }
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
llvm::Value *This;
-
+
if (ME->isArrow())
This = EmitScalarExpr(ME->getBase());
else {
LValue BaseLV = EmitLValue(ME->getBase());
This = BaseLV.getAddress();
}
-
- return EmitCXXMemberCall(MD, Callee, This,
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ llvm::Value *Callee;
+ if (MD->isVirtual() && !ME->hasQualifier() &&
+ !canDevirtualizeMemberFunctionCalls(ME->getBase()))
+ Callee = BuildVirtualCall(MD, This, Ty);
+ else if (const CXXDestructorDecl *Destructor
+ = dyn_cast<CXXDestructorDecl>(MD))
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+ return EmitCXXMemberCall(MD, Callee, This,
CE->arg_begin(), CE->arg_end());
}
-RValue
-CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD) {
- assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E) {
+ const BinaryOperator *BO = cast<BinaryOperator>(E->getCallee());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->getAs<MemberPointerType>();
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(MPT->getClass())->getDecl());
+
+ const llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+ FPT->isVariadic());
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr =
+ CreateTempAlloca(ConvertType(MemFnExpr->getType()), "mem.fn");
+ EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
- const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
+ if (BO->getOpcode() == BinaryOperator::PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
- llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+ // Adjust it.
+ llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
+ Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
+
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
+ Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
+
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this");
+
+ llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
+ const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+ llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual
+ = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
+ "and");
+
+ IsVirtual = Builder.CreateTrunc(IsVirtual,
+ llvm::Type::getInt1Ty(VMContext));
+
+ llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
+ llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
+ llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
+
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+ EmitBlock(FnVirtual);
+
+ const llvm::Type *VTableTy =
+ FTy->getPointerTo()->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy);
+ VTable = Builder.CreateLoad(VTable);
+
+ VTable = Builder.CreateGEP(VTable, FnAsInt, "fn");
+
+ // Since the function pointer is 1 plus the virtual table offset, we
+ // subtract 1 by using a GEP.
+ VTable = Builder.CreateConstGEP1_64(VTable, (uint64_t)-1);
+
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "virtualfn");
+
+ EmitBranch(FnEnd);
+ EmitBlock(FnNonVirtual);
+
+ // If the function is not virtual, just load the pointer.
+ llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
+ NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
+
+ EmitBlock(FnEnd);
+
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+ Callee->reserveOperandSpace(2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.push_back(std::make_pair(RValue::get(This), ThisType));
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ QualType ResultType = BO->getType()->getAs<FunctionType>()->getResultType();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ Callee, Args, 0);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ if (MD->isCopyAssignment()) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
+ if (ClassDecl->hasTrivialCopyAssignment()) {
+ assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+ "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+ llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+
return EmitCXXMemberCall(MD, Callee, This,
E->arg_begin() + 1, E->arg_end());
}
llvm::Value *CodeGenFunction::LoadCXXThis() {
- assert(isa<CXXMethodDecl>(CurFuncDecl) &&
+ assert(isa<CXXMethodDecl>(CurFuncDecl) &&
"Must be in a C++ member function decl to load 'this'");
assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
"Must be in a C++ member function decl to load 'this'");
-
+
// FIXME: What if we're inside a block?
// ans: See how CodeGenFunction::LoadObjCSelf() uses
// CodeGenFunction::BlockForwardSelf() for how to do this.
return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
}
+/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
+/// for-loop to call the default constructor on individual members of the
+/// array.
+/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
+/// array type and 'ArrayPtr' points to the beginning fo the array.
+/// It is assumed that all relevant checks have been made by the caller.
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value * NumElements =
+ llvm::ConstantInt::get(SizeTy,
+ getContext().getConstantArrayElementCount(ArrayTy));
+
+ EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr);
+}
+
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr) {
+ const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, IndexPtr, false);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter,
+ "arrayidx");
+ EmitCXXConstructorCall(D, Ctor_Complete, Address, 0, 0);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr, false);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+ const ArrayType *Array,
+ llvm::Value *This) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "Do we support VLA for destruction ?");
+ llvm::Value *One = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ 1);
+ uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
+ // Create a temporary for the loop index and initialize it with count of
+ // array elements.
+ llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
+ "loop.index");
+ // Index = ElementCount;
+ llvm::Value* UpperCount =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), ElementCount);
+ Builder.CreateStore(UpperCount, IndexPtr, false);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // Generate: if (loop-index != 0 fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
+ "isne");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One);
+ llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
+ EmitCXXDestructorCall(D, Dtor_Complete, Address);
+
+ EmitBlock(ContinueBlock);
+
+ // Emit the decrement of the loop counter.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Counter = Builder.CreateSub(Counter, One, "dec");
+ Builder.CreateStore(Counter, IndexPtr, false);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
void
-CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type,
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isCopyConstructor(getContext())) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(D->getDeclContext());
+ if (ClassDecl->hasTrivialCopyConstructor()) {
+ assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
+ "EmitCXXConstructorCall - user declared copy constructor");
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+ }
+
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
EmitCXXMemberCall(D, Callee, This, ArgBeg, ArgEnd);
}
-void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
CXXDtorType Type,
llvm::Value *This) {
llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type);
-
+
EmitCXXMemberCall(D, Callee, This, 0, 0);
}
-void
-CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const CXXConstructExpr *E) {
assert(Dest && "Must have a destination!");
-
- const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(E->getType()->getAsRecordType()->getDecl());
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
if (RD->hasTrivialConstructor())
return;
-
- // Call the constructor.
- EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
- E->arg_begin(), E->arg_end());
-}
-
-llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
- if (E->isArray()) {
- ErrorUnsupported(E, "new[] expression");
- return llvm::UndefValue::get(ConvertType(E->getType()));
- }
-
- QualType AllocType = E->getAllocatedType();
- FunctionDecl *NewFD = E->getOperatorNew();
- const FunctionProtoType *NewFTy = NewFD->getType()->getAsFunctionProtoType();
-
- CallArgList NewArgs;
-
- // The allocation size is the first argument.
- QualType SizeTy = getContext().getSizeType();
- llvm::Value *AllocSize =
- llvm::ConstantInt::get(ConvertType(SizeTy),
- getContext().getTypeSize(AllocType) / 8);
-
- NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
-
- // Emit the rest of the arguments.
- // FIXME: Ideally, this should just use EmitCallArgs.
- CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
-
- // First, use the types from the function type.
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
- for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
- QualType ArgType = NewFTy->getArgType(i);
-
- assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
- getTypePtr() ==
- getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
- "type mismatch in call argument!");
-
- NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
- ArgType));
-
- }
-
- // Either we've emitted all the call args, or we have a call to a
- // variadic function.
- assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
- "Extra arguments in non-variadic function!");
-
- // If we still have any arguments, emit them using the type of the argument.
- for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
- NewArg != NewArgEnd; ++NewArg) {
- QualType ArgType = NewArg->getType();
- NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
- ArgType));
- }
-
- // Emit the call to new.
- RValue RV =
- EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
- CGM.GetAddrOfFunction(GlobalDecl(NewFD)),
- NewArgs, NewFD);
-
- // If an allocation function is declared with an empty exception specification
- // it returns null to indicate failure to allocate storage. [expr.new]p13.
- // (We don't need to check for null when there's no new initializer and
- // we're allocating a POD type).
- bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
- !(AllocType->isPODType() && !E->hasInitializer());
-
- llvm::BasicBlock *NewNull = 0;
- llvm::BasicBlock *NewNotNull = 0;
- llvm::BasicBlock *NewEnd = 0;
-
- llvm::Value *NewPtr = RV.getScalarVal();
-
- if (NullCheckResult) {
- NewNull = createBasicBlock("new.null");
- NewNotNull = createBasicBlock("new.notnull");
- NewEnd = createBasicBlock("new.end");
-
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(NewPtr,
- llvm::Constant::getNullValue(NewPtr->getType()),
- "isnull");
-
- Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
- EmitBlock(NewNotNull);
- }
-
- NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
-
- if (AllocType->isPODType()) {
- if (E->getNumConstructorArgs() > 0) {
- assert(E->getNumConstructorArgs() == 1 &&
- "Can only have one argument to initializer of POD type.");
-
- const Expr *Init = E->getConstructorArg(0);
-
- if (!hasAggregateLLVMType(AllocType))
- Builder.CreateStore(EmitScalarExpr(Init), NewPtr);
- else if (AllocType->isAnyComplexType())
- EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified());
- else
- EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
- }
- } else {
- // Call the constructor.
- CXXConstructorDecl *Ctor = E->getConstructor();
-
- EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
- E->constructor_arg_begin(),
- E->constructor_arg_end());
- }
-
- if (NullCheckResult) {
- Builder.CreateBr(NewEnd);
- EmitBlock(NewNull);
- Builder.CreateBr(NewEnd);
- EmitBlock(NewEnd);
-
- llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
- PHI->reserveOperandSpace(2);
- PHI->addIncoming(NewPtr, NewNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
-
- NewPtr = PHI;
- }
-
- return NewPtr;
-}
-static bool canGenerateCXXstructor(const CXXRecordDecl *RD,
- ASTContext &Context) {
- // The class has base classes - we don't support that right now.
- if (RD->getNumBases() > 0)
- return false;
-
- for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I) {
- // We don't support ctors for fields that aren't POD.
- if (!I->getType()->isPODType())
- return false;
+ // Code gen optimization to eliminate copy constructor and return
+ // its first argument instead.
+ if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+ CXXConstructExpr::const_arg_iterator i = E->arg_begin();
+ EmitAggExpr((*i), Dest, false);
+ return;
}
-
- return true;
+ // Call the constructor.
+ EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
+ E->arg_begin(), E->arg_end());
}
void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
- if (!canGenerateCXXstructor(D->getParent(), getContext())) {
- ErrorUnsupported(D, "C++ constructor", true);
- return;
- }
-
EmitGlobal(GlobalDecl(D, Ctor_Complete));
EmitGlobal(GlobalDecl(D, Ctor_Base));
}
-void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
-
+
llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
-
- CodeGenFunction(*this).GenerateCode(D, Fn);
-
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
llvm::Function *
-CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
-
+
const char *Name = getMangledCXXCtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
-const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
+const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
- mangleCXXCtor(D, Type, Context, Out);
-
+ mangleCXXCtor(getMangleContext(), D, Type, Out);
+
Name += '\0';
return UniqueMangledName(Name.begin(), Name.end());
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
- if (!canGenerateCXXstructor(D->getParent(), getContext())) {
- ErrorUnsupported(D, "C++ destructor", true);
- return;
- }
-
EmitCXXDestructor(D, Dtor_Complete);
EmitCXXDestructor(D, Dtor_Base);
}
-void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
-
- CodeGenFunction(*this).GenerateCode(D, Fn);
-
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
llvm::Function *
-CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
-
+
const char *Name = getMangledCXXDtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
-const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
+const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
CXXDtorType Type) {
llvm::SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
- mangleCXXDtor(D, Type, Context, Out);
-
+ mangleCXXDtor(getMangleContext(), D, Type, Out);
+
Name += '\0';
return UniqueMangledName(Name.begin(), Name.end());
}
+
+llvm::Constant *CodeGenFunction::GenerateThunk(llvm::Function *Fn,
+ const CXXMethodDecl *MD,
+ bool Extern, int64_t nv,
+ int64_t v) {
+ QualType R = MD->getType()->getAs<FunctionType>()->getResultType();
+
+ FunctionArgList Args;
+ ImplicitParamDecl *ThisDecl =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ MD->getThisType(getContext()));
+ Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ for (FunctionDecl::param_const_iterator i = MD->param_begin(),
+ e = MD->param_end();
+ i != e; ++i) {
+ ParmVarDecl *D = *i;
+ Args.push_back(std::make_pair(D, D->getType()));
+ }
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__thunk_named_foo_");
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ Extern
+ ? FunctionDecl::Extern
+ : FunctionDecl::Static,
+ false, true);
+ StartFunction(FD, R, Fn, Args, SourceLocation());
+ // FIXME: generate body
+ FinishFunction();
+ return Fn;
+}
+
+llvm::Constant *CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
+ const CXXMethodDecl *MD,
+ bool Extern,
+ int64_t nv_t,
+ int64_t v_t,
+ int64_t nv_r,
+ int64_t v_r) {
+ QualType R = MD->getType()->getAs<FunctionType>()->getResultType();
+
+ FunctionArgList Args;
+ ImplicitParamDecl *ThisDecl =
+ ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+ MD->getThisType(getContext()));
+ Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+ for (FunctionDecl::param_const_iterator i = MD->param_begin(),
+ e = MD->param_end();
+ i != e; ++i) {
+ ParmVarDecl *D = *i;
+ Args.push_back(std::make_pair(D, D->getType()));
+ }
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__thunk_named_foo_");
+ FunctionDecl *FD = FunctionDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(), II, R, 0,
+ Extern
+ ? FunctionDecl::Extern
+ : FunctionDecl::Static,
+ false, true);
+ StartFunction(FD, R, Fn, Args, SourceLocation());
+ // FIXME: generate body
+ FinishFunction();
+ return Fn;
+}
+
+llvm::Constant *CodeGenModule::BuildThunk(const CXXMethodDecl *MD, bool Extern,
+ int64_t nv, int64_t v) {
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ mangleThunk(getMangleContext(), MD, nv, v, Out);
+ llvm::GlobalVariable::LinkageTypes linktype;
+ linktype = llvm::GlobalValue::WeakAnyLinkage;
+ if (!Extern)
+ linktype = llvm::GlobalValue::InternalLinkage;
+ llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ llvm::Function *Fn = llvm::Function::Create(FTy, linktype, Out.str(),
+ &getModule());
+ CodeGenFunction(*this).GenerateThunk(Fn, MD, Extern, nv, v);
+ // Fn = Builder.CreateBitCast(Fn, Ptr8Ty);
+ llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+ return m;
+}
+
+llvm::Constant *CodeGenModule::BuildCovariantThunk(const CXXMethodDecl *MD,
+ bool Extern, int64_t nv_t,
+ int64_t v_t, int64_t nv_r,
+ int64_t v_r) {
+ llvm::SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ mangleCovariantThunk(getMangleContext(), MD, nv_t, v_t, nv_r, v_r, Out);
+ llvm::GlobalVariable::LinkageTypes linktype;
+ linktype = llvm::GlobalValue::WeakAnyLinkage;
+ if (!Extern)
+ linktype = llvm::GlobalValue::InternalLinkage;
+ llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::FunctionType *FTy =
+ getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+
+ llvm::Function *Fn = llvm::Function::Create(FTy, linktype, Out.str(),
+ &getModule());
+ CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, nv_t, v_t, nv_r,
+ v_r);
+ // Fn = Builder.CreateBitCast(Fn, Ptr8Ty);
+ llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+ return m;
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualCXXBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+ llvm::Value *VTablePtr = Builder.CreateBitCast(This,
+ Int8PtrTy->getPointerTo());
+ VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
+
+ int64_t VBaseOffsetIndex =
+ CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
+ const llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *&This,
+ const llvm::Type *Ty) {
+ int64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD);
+
+ Ty = llvm::PointerType::get(Ty, 0);
+ Ty = llvm::PointerType::get(Ty, 0);
+ Ty = llvm::PointerType::get(Ty, 0);
+ llvm::Value *vtbl = Builder.CreateBitCast(This, Ty);
+ vtbl = Builder.CreateLoad(vtbl);
+ llvm::Value *vfn = Builder.CreateConstInBoundsGEP1_64(vtbl,
+ Index, "vfn");
+ vfn = Builder.CreateLoad(vfn);
+ return vfn;
+}
+
+/// EmitClassAggrMemberwiseCopy - This routine generates code to copy a class
+/// array of objects from SrcValue to DestValue. Copying can be either a bitwise
+/// copy or via a copy constructor call.
+// FIXME. Consolidate this with EmitCXXAggrConstructorCall.
+void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
+ llvm::Value *Src,
+ const ArrayType *Array,
+ const CXXRecordDecl *BaseClassDecl,
+ QualType Ty) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "VLA cannot be copied over");
+ bool BitwiseCopy = BaseClassDecl->hasTrivialCopyConstructor();
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
+ "loop.index");
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
+ Builder.CreateStore(zeroConstant, IndexPtr, false);
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
+ llvm::Value * NumElementsPtr =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the constructor call on the array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
+ Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
+ if (BitwiseCopy)
+ EmitAggregateCopy(Dest, Src, Ty);
+ else if (CXXConstructorDecl *BaseCopyCtor =
+ BaseClassDecl->getCopyConstructor(getContext(), 0)) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
+ Ctor_Complete);
+ CallArgList CallArgs;
+ // Push the this (Dest) ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Dest),
+ BaseCopyCtor->getThisType(getContext())));
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ BaseCopyCtor->getParamDecl(0)->getType()));
+ QualType ResultType =
+ BaseCopyCtor->getType()->getAs<FunctionType>()->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
+ Callee, CallArgs, BaseCopyCtor);
+ }
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr, false);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitClassAggrCopyAssignment - This routine generates code to assign a class
+/// array of objects from SrcValue to DestValue. Assignment can be either a
+/// bitwise assignment or via a copy assignment operator function call.
+/// FIXME. This can be consolidated with EmitClassAggrMemberwiseCopy
+void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
+ llvm::Value *Src,
+ const ArrayType *Array,
+ const CXXRecordDecl *BaseClassDecl,
+ QualType Ty) {
+ const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+ assert(CA && "VLA cannot be asssigned");
+ bool BitwiseAssign = BaseClassDecl->hasTrivialCopyAssignment();
+
+ // Create a temporary for the loop index and initialize it with 0.
+ llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
+ "loop.index");
+ llvm::Value* zeroConstant =
+ llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
+ Builder.CreateStore(zeroConstant, IndexPtr, false);
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+ EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
+ llvm::Value * NumElementsPtr =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
+ llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+ // If the condition is true, execute the body.
+ Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+ // Inside the loop body, emit the assignment operator call on array element.
+ Counter = Builder.CreateLoad(IndexPtr);
+ Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
+ Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
+ const CXXMethodDecl *MD = 0;
+ if (BitwiseAssign)
+ EmitAggregateCopy(Dest, Src, Ty);
+ else {
+ bool hasCopyAssign = BaseClassDecl->hasConstCopyAssignment(getContext(),
+ MD);
+ assert(hasCopyAssign && "EmitClassAggrCopyAssignment - No user assign");
+ (void)hasCopyAssign;
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *LTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
+
+ CallArgList CallArgs;
+ // Push the this (Dest) ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Dest),
+ MD->getThisType(getContext())));
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ MD->getParamDecl(0)->getType()));
+ QualType ResultType = MD->getType()->getAs<FunctionType>()->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
+ Callee, CallArgs, MD);
+ }
+ EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = Builder.CreateLoad(IndexPtr);
+ NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+ Builder.CreateStore(NextVal, IndexPtr, false);
+
+ // Finally, branch back up to the condition for the next iteration.
+ EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ EmitBlock(AfterFor, true);
+}
+
+/// EmitClassMemberwiseCopy - This routine generates code to copy a class
+/// object from SrcValue to DestValue. Copying can be either a bitwise copy
+/// or via a copy constructor call.
+void CodeGenFunction::EmitClassMemberwiseCopy(
+ llvm::Value *Dest, llvm::Value *Src,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl, QualType Ty) {
+ if (ClassDecl) {
+ Dest = GetAddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl,
+ /*NullCheckValue=*/false);
+ Src = GetAddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl,
+ /*NullCheckValue=*/false);
+ }
+ if (BaseClassDecl->hasTrivialCopyConstructor()) {
+ EmitAggregateCopy(Dest, Src, Ty);
+ return;
+ }
+
+ if (CXXConstructorDecl *BaseCopyCtor =
+ BaseClassDecl->getCopyConstructor(getContext(), 0)) {
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
+ Ctor_Complete);
+ CallArgList CallArgs;
+ // Push the this (Dest) ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Dest),
+ BaseCopyCtor->getThisType(getContext())));
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ BaseCopyCtor->getParamDecl(0)->getType()));
+ QualType ResultType =
+ BaseCopyCtor->getType()->getAs<FunctionType>()->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
+ Callee, CallArgs, BaseCopyCtor);
+ }
+}
+
+/// EmitClassCopyAssignment - This routine generates code to copy assign a class
+/// object from SrcValue to DestValue. Assignment can be either a bitwise
+/// assignment of via an assignment operator call.
+// FIXME. Consolidate this with EmitClassMemberwiseCopy as they share a lot.
+void CodeGenFunction::EmitClassCopyAssignment(
+ llvm::Value *Dest, llvm::Value *Src,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl,
+ QualType Ty) {
+ if (ClassDecl) {
+ Dest = GetAddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl,
+ /*NullCheckValue=*/false);
+ Src = GetAddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl,
+ /*NullCheckValue=*/false);
+ }
+ if (BaseClassDecl->hasTrivialCopyAssignment()) {
+ EmitAggregateCopy(Dest, Src, Ty);
+ return;
+ }
+
+ const CXXMethodDecl *MD = 0;
+ bool ConstCopyAssignOp = BaseClassDecl->hasConstCopyAssignment(getContext(),
+ MD);
+ assert(ConstCopyAssignOp && "EmitClassCopyAssignment - missing copy assign");
+ (void)ConstCopyAssignOp;
+
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ const llvm::Type *LTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
+
+ CallArgList CallArgs;
+ // Push the this (Dest) ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Dest),
+ MD->getThisType(getContext())));
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ MD->getParamDecl(0)->getType()));
+ QualType ResultType =
+ MD->getType()->getAs<FunctionType>()->getResultType();
+ EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
+ Callee, CallArgs, MD);
+}
+
+/// SynthesizeDefaultConstructor - synthesize a default constructor
+void
+CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args,
+ SourceLocation());
+ EmitCtorPrologue(Ctor, Type);
+ FinishFunction();
+}
+
+/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a copy
+/// constructor, in accordance with section 12.8 (p7 and p8) of C++03
+/// The implicitly-defined copy constructor for class X performs a memberwise
+/// copy of its subobjects. The order of copying is the same as the order
+/// of initialization of bases and members in a user-defined constructor
+/// Each subobject is copied in the manner appropriate to its type:
+/// if the subobject is of class type, the copy constructor for the class is
+/// used;
+/// if the subobject is an array, each element is copied, in the manner
+/// appropriate to the element type;
+/// if the subobject is of scalar type, the built-in assignment operator is
+/// used.
+/// Virtual base class subobjects shall be copied only once by the
+/// implicitly-defined copy constructor
+
+void
+CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ const CXXRecordDecl *ClassDecl = Ctor->getParent();
+ assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
+ "SynthesizeCXXCopyConstructor - copy constructor has definition already");
+ StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args,
+ SourceLocation());
+
+ FunctionArgList::const_iterator i = Args.begin();
+ const VarDecl *ThisArg = i->first;
+ llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
+ llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
+ const VarDecl *SrcArg = (i+1)->first;
+ llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
+ llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
+
+ for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ // FIXME. copy constrution of virtual base NYI
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ EmitClassMemberwiseCopy(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
+ Base->getType());
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = getContext().getCanonicalType((*Field)->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
+ LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *DestBaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ llvm::Value *SrcBaseAddrPtr =
+ Builder.CreateBitCast(RHS.getAddress(), BasePtr);
+ EmitClassAggrMemberwiseCopy(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
+ FieldClassDecl, FieldType);
+ }
+ else
+ EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
+ 0 /*ClassDecl*/, FieldClassDecl, FieldType);
+ continue;
+ }
+ // Do a built-in assignment of scalar data members.
+ LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
+ LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0);
+ RValue RVRHS = EmitLoadOfLValue(RHS, FieldType);
+ EmitStoreThroughLValue(RVRHS, LHS, FieldType);
+ }
+ FinishFunction();
+}
+
+/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
+/// Before the implicitly-declared copy assignment operator for a class is
+/// implicitly defined, all implicitly- declared copy assignment operators for
+/// its direct base classes and its nonstatic data members shall have been
+/// implicitly defined. [12.8-p12]
+/// The implicitly-defined copy assignment operator for class X performs
+/// memberwise assignment of its subob- jects. The direct base classes of X are
+/// assigned first, in the order of their declaration in
+/// the base-specifier-list, and then the immediate nonstatic data members of X
+/// are assigned, in the order in which they were declared in the class
+/// definition.Each subobject is assigned in the manner appropriate to its type:
+/// if the subobject is of class type, the copy assignment operator for the
+/// class is used (as if by explicit qualification; that is, ignoring any
+/// possible virtual overriding functions in more derived classes);
+///
+/// if the subobject is an array, each element is assigned, in the manner
+/// appropriate to the element type;
+///
+/// if the subobject is of scalar type, the built-in assignment operator is
+/// used.
+void CodeGenFunction::SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
+ assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+ "SynthesizeCXXCopyAssignment - copy assignment has user declaration");
+ StartFunction(CD, CD->getResultType(), Fn, Args, SourceLocation());
+
+ FunctionArgList::const_iterator i = Args.begin();
+ const VarDecl *ThisArg = i->first;
+ llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
+ llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
+ const VarDecl *SrcArg = (i+1)->first;
+ llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
+ llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
+
+ for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ // FIXME. copy assignment of virtual base NYI
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ EmitClassCopyAssignment(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
+ Base->getType());
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = getContext().getCanonicalType((*Field)->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl
+ = cast<CXXRecordDecl>(FieldClassType->getDecl());
+ LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
+ LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *DestBaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ llvm::Value *SrcBaseAddrPtr =
+ Builder.CreateBitCast(RHS.getAddress(), BasePtr);
+ EmitClassAggrCopyAssignment(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
+ FieldClassDecl, FieldType);
+ }
+ else
+ EmitClassCopyAssignment(LHS.getAddress(), RHS.getAddress(),
+ 0 /*ClassDecl*/, FieldClassDecl, FieldType);
+ continue;
+ }
+ // Do a built-in assignment of scalar data members.
+ LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
+ LValue RHS = EmitLValueForField(LoadOfSrc, *Field, false, 0);
+ RValue RVRHS = EmitLoadOfLValue(RHS, FieldType);
+ EmitStoreThroughLValue(RVRHS, LHS, FieldType);
+ }
+
+ // return *this;
+ Builder.CreateStore(LoadOfThis, ReturnValue);
+
+ FinishFunction();
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+/// FIXME: This needs to take a CXXCtorType.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
+ // FIXME: Add vbase initialization
+ llvm::Value *LoadOfThis = 0;
+
+ for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+ B != E; ++B) {
+ CXXBaseOrMemberInitializer *Member = (*B);
+ if (Member->isBaseInitializer()) {
+ LoadOfThis = LoadCXXThis();
+ Type *BaseType = Member->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+ llvm::Value *V = GetAddressCXXOfBaseClass(LoadOfThis, ClassDecl,
+ BaseClassDecl,
+ /*NullCheckValue=*/false);
+ EmitCXXConstructorCall(Member->getConstructor(),
+ CtorType, V,
+ Member->const_arg_begin(),
+ Member->const_arg_end());
+ } else {
+ // non-static data member initilaizers.
+ FieldDecl *Field = Member->getMember();
+ QualType FieldType = getContext().getCanonicalType((Field)->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+
+ LoadOfThis = LoadCXXThis();
+ LValue LHS;
+ if (FieldType->isReferenceType()) {
+ // FIXME: This is really ugly; should be refactored somehow
+ unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(LoadOfThis, idx, "tmp");
+ assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+ LHS = LValue::MakeAddr(V, MakeQualifiers(FieldType));
+ } else {
+ LHS = EmitLValueForField(LoadOfThis, Field, false, 0);
+ }
+ if (FieldType->getAs<RecordType>()) {
+ if (!Field->isAnonymousStructOrUnion()) {
+ assert(Member->getConstructor() &&
+ "EmitCtorPrologue - no constructor to initialize member");
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ EmitCXXAggrConstructorCall(Member->getConstructor(),
+ Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXConstructorCall(Member->getConstructor(),
+ Ctor_Complete, LHS.getAddress(),
+ Member->const_arg_begin(),
+ Member->const_arg_end());
+ continue;
+ }
+ else {
+ // Initializing an anonymous union data member.
+ FieldDecl *anonMember = Member->getAnonUnionMember();
+ LHS = EmitLValueForField(LHS.getAddress(), anonMember,
+ /*IsUnion=*/true, 0);
+ FieldType = anonMember->getType();
+ }
+ }
+
+ assert(Member->getNumArgs() == 1 && "Initializer count must be 1 only");
+ Expr *RhsExpr = *Member->arg_begin();
+ RValue RHS;
+ if (FieldType->isReferenceType())
+ RHS = EmitReferenceBindingToExpr(RhsExpr, FieldType,
+ /*IsInitializer=*/true);
+ else
+ RHS = RValue::get(EmitScalarExpr(RhsExpr, true));
+ EmitStoreThroughLValue(RHS, LHS, FieldType);
+ }
+ }
+
+ if (!CD->getNumBaseOrMemberInitializers() && !CD->isTrivial()) {
+ // Nontrivial default constructor with no initializer list. It may still
+ // have bases classes and/or contain non-static data members which require
+ // construction.
+ for (CXXRecordDecl::base_class_const_iterator Base =
+ ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ // FIXME. copy assignment of virtual base NYI
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (BaseClassDecl->hasTrivialConstructor())
+ continue;
+ if (CXXConstructorDecl *BaseCX =
+ BaseClassDecl->getDefaultConstructor(getContext())) {
+ LoadOfThis = LoadCXXThis();
+ llvm::Value *V = GetAddressCXXOfBaseClass(LoadOfThis, ClassDecl,
+ BaseClassDecl,
+ /*NullCheckValue=*/false);
+ EmitCXXConstructorCall(BaseCX, Ctor_Complete, V, 0, 0);
+ }
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = getContext().getCanonicalType((*Field)->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+ if (!FieldType->getAs<RecordType>() || Field->isAnonymousStructOrUnion())
+ continue;
+ const RecordType *ClassRec = FieldType->getAs<RecordType>();
+ CXXRecordDecl *MemberClassDecl =
+ dyn_cast<CXXRecordDecl>(ClassRec->getDecl());
+ if (!MemberClassDecl || MemberClassDecl->hasTrivialConstructor())
+ continue;
+ if (CXXConstructorDecl *MamberCX =
+ MemberClassDecl->getDefaultConstructor(getContext())) {
+ LoadOfThis = LoadCXXThis();
+ LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ EmitCXXAggrConstructorCall(MamberCX, Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXConstructorCall(MamberCX, Ctor_Complete, LHS.getAddress(),
+ 0, 0);
+ }
+ }
+ }
+
+ // Initialize the vtable pointer
+ if (ClassDecl->isDynamicClass()) {
+ if (!LoadOfThis)
+ LoadOfThis = LoadCXXThis();
+ llvm::Value *VtableField;
+ llvm::Type *Ptr8Ty, *PtrPtr8Ty;
+ Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+ PtrPtr8Ty = llvm::PointerType::get(Ptr8Ty, 0);
+ VtableField = Builder.CreateBitCast(LoadOfThis, PtrPtr8Ty);
+ llvm::Value *vtable = GenerateVtable(ClassDecl);
+ Builder.CreateStore(vtable, VtableField);
+ }
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+/// FIXME: This needs to take a CXXDtorType.
+void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(DD->getDeclContext());
+ assert(!ClassDecl->getNumVBases() &&
+ "FIXME: Destruction of virtual bases not supported");
+ (void)ClassDecl; // prevent warning.
+
+ for (CXXDestructorDecl::destr_const_iterator *B = DD->destr_begin(),
+ *E = DD->destr_end(); B != E; ++B) {
+ uintptr_t BaseOrMember = (*B);
+ if (DD->isMemberToDestroy(BaseOrMember)) {
+ FieldDecl *FD = DD->getMemberToDestroy(BaseOrMember);
+ QualType FieldType = getContext().getCanonicalType((FD)->getType());
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->hasTrivialDestructor())
+ continue;
+ llvm::Value *LoadOfThis = LoadCXXThis();
+ LValue LHS = EmitLValueForField(LoadOfThis, FD, false, 0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Dtor_Complete, LHS.getAddress());
+ } else {
+ const RecordType *RT =
+ DD->getAnyBaseClassToDestroy(BaseOrMember)->getAs<RecordType>();
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+ llvm::Value *V = GetAddressCXXOfBaseClass(LoadCXXThis(),
+ ClassDecl, BaseClassDecl,
+ /*NullCheckValue=*/false);
+ EmitCXXDestructorCall(BaseClassDecl->getDestructor(getContext()),
+ DtorType, V);
+ }
+ }
+ if (DD->getNumBaseOrMemberDestructions() || DD->isTrivial())
+ return;
+ // Case of destructor synthesis with fields and base classes
+ // which have non-trivial destructors. They must be destructed in
+ // reverse order of their construction.
+ llvm::SmallVector<FieldDecl *, 16> DestructedFields;
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ QualType FieldType = getContext().getCanonicalType((*Field)->getType());
+ if (getContext().getAsConstantArrayType(FieldType))
+ FieldType = getContext().getBaseElementType(FieldType);
+ if (const RecordType *RT = FieldType->getAs<RecordType>()) {
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->hasTrivialDestructor())
+ continue;
+ DestructedFields.push_back(*Field);
+ }
+ }
+ if (!DestructedFields.empty())
+ for (int i = DestructedFields.size() -1; i >= 0; --i) {
+ FieldDecl *Field = DestructedFields[i];
+ QualType FieldType = Field->getType();
+ const ConstantArrayType *Array =
+ getContext().getAsConstantArrayType(FieldType);
+ if (Array)
+ FieldType = getContext().getBaseElementType(FieldType);
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ llvm::Value *LoadOfThis = LoadCXXThis();
+ LValue LHS = EmitLValueForField(LoadOfThis, Field, false, 0);
+ if (Array) {
+ const llvm::Type *BasePtr = ConvertType(FieldType);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Array, BaseAddrPtr);
+ }
+ else
+ EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ Dtor_Complete, LHS.getAddress());
+ }
+
+ llvm::SmallVector<CXXRecordDecl*, 4> DestructedBases;
+ for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ // FIXME. copy assignment of virtual base NYI
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+ DestructedBases.push_back(BaseClassDecl);
+ }
+ if (DestructedBases.empty())
+ return;
+ for (int i = DestructedBases.size() -1; i >= 0; --i) {
+ CXXRecordDecl *BaseClassDecl = DestructedBases[i];
+ llvm::Value *V = GetAddressCXXOfBaseClass(LoadCXXThis(),
+ ClassDecl,BaseClassDecl,
+ /*NullCheckValue=*/false);
+ EmitCXXDestructorCall(BaseClassDecl->getDestructor(getContext()),
+ Dtor_Complete, V);
+ }
+}
+
+void CodeGenFunction::SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ assert(!ClassDecl->hasUserDeclaredDestructor() &&
+ "SynthesizeDefaultDestructor - destructor has user declaration");
+ (void) ClassDecl;
+
+ StartFunction(GlobalDecl(Dtor, DtorType), Dtor->getResultType(), Fn, Args,
+ SourceLocation());
+ EmitDtorEpilogue(Dtor, DtorType);
+ FinishFunction();
+}
+
+// FIXME: Move this to CGCXXStmt.cpp
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ // FIXME: We need to do more here.
+ EmitStmt(S.getTryBlock());
+}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
index 6051d9133c02..1e6adb05a0d9 100644
--- a/lib/CodeGen/CGCXX.h
+++ b/lib/CodeGen/CGCXX.h
@@ -30,7 +30,7 @@ enum CXXDtorType {
Dtor_Complete, // Complete object dtor
Dtor_Base // Base object dtor
};
-
+
} // end namespace clang
#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCXXClass.cpp b/lib/CodeGen/CGCXXClass.cpp
new file mode 100644
index 000000000000..56a28fc9a007
--- /dev/null
+++ b/lib/CodeGen/CGCXXClass.cpp
@@ -0,0 +1,176 @@
+//===--- CGCXXClass.cpp - Emit LLVM Code for C++ classes ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t
+ComputeNonVirtualBaseClassOffset(ASTContext &Context, CXXBasePaths &Paths,
+ unsigned Start) {
+ uint64_t Offset = 0;
+
+ const CXXBasePath &Path = Paths.front();
+ for (unsigned i = Start, e = Path.size(); i != e; ++i) {
+ const CXXBasePathElement& Element = Path[i];
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const CXXBaseSpecifier *BS = Element.Base;
+ assert(!BS->isVirtual() && "Should not see virtual bases here!");
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BS->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(Base) / 8;
+ }
+
+ return Offset;
+}
+
+llvm::Constant *
+CodeGenModule::GetCXXBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ if (ClassDecl == BaseClassDecl)
+ return 0;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+ if (!const_cast<CXXRecordDecl *>(ClassDecl)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return 0;
+ }
+
+ uint64_t Offset = ComputeNonVirtualBaseClassOffset(getContext(), Paths, 0);
+ if (!Offset)
+ return 0;
+
+ const llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
+
+static llvm::Value *GetCXXBaseClassOffset(CodeGenFunction &CGF,
+ llvm::Value *BaseValue,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+ if (!const_cast<CXXRecordDecl *>(ClassDecl)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return 0;
+ }
+
+ unsigned Start = 0;
+ llvm::Value *VirtualOffset = 0;
+ if (const RecordType *RT = Paths.getDetectedVirtual()) {
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(RT->getDecl());
+
+ VirtualOffset =
+ CGF.GetVirtualCXXBaseClassOffset(BaseValue, ClassDecl, VBase);
+
+ const CXXBasePath &Path = Paths.front();
+ unsigned e = Path.size();
+ for (Start = 0; Start != e; ++Start) {
+ const CXXBasePathElement& Element = Path[Start];
+
+ if (Element.Class == VBase)
+ break;
+ }
+ }
+
+ uint64_t Offset =
+ ComputeNonVirtualBaseClassOffset(CGF.getContext(), Paths, Start);
+
+ if (!Offset)
+ return VirtualOffset;
+
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ llvm::Value *NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, Offset);
+
+ if (VirtualOffset)
+ return CGF.Builder.CreateAdd(VirtualOffset, NonVirtualOffset);
+
+ return NonVirtualOffset;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressCXXOfBaseClass(llvm::Value *BaseValue,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl,
+ bool NullCheckValue) {
+ QualType BTy =
+ getContext().getCanonicalType(
+ getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(BaseClassDecl)));
+ const llvm::Type *BasePtrTy = llvm::PointerType::getUnqual(ConvertType(BTy));
+
+ if (ClassDecl == BaseClassDecl) {
+ // Just cast back.
+ return Builder.CreateBitCast(BaseValue, BasePtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(BaseValue,
+ llvm::Constant::getNullValue(BaseValue->getType()));
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ llvm::Value *Offset =
+ GetCXXBaseClassOffset(*this, BaseValue, ClassDecl, BaseClassDecl);
+
+ if (Offset) {
+ // Apply the offset.
+ BaseValue = Builder.CreateBitCast(BaseValue, Int8PtrTy);
+ BaseValue = Builder.CreateGEP(BaseValue, Offset, "add.ptr");
+ }
+
+ // Cast back.
+ BaseValue = Builder.CreateBitCast(BaseValue, BasePtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(BaseValue->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(BaseValue, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(BaseValue->getType()),
+ CastNull);
+ BaseValue = PHI;
+ }
+
+ return BaseValue;
+}
diff --git a/lib/CodeGen/CGCXXExpr.cpp b/lib/CodeGen/CGCXXExpr.cpp
new file mode 100644
index 000000000000..2d62df6c58a4
--- /dev/null
+++ b/lib/CodeGen/CGCXXExpr.cpp
@@ -0,0 +1,304 @@
+//===--- CGCXXExpr.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
+ if (!E->isArray())
+ return 0;
+
+ QualType T = E->getAllocatedType();
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return 0;
+
+ // Check if the class has a trivial destructor.
+ if (RD->hasTrivialDestructor()) {
+ // FIXME: Check for a two-argument delete.
+ return 0;
+ }
+
+ // Padding is the maximum of sizeof(size_t) and alignof(T)
+ return std::max(Ctx.getTypeSize(Ctx.getSizeType()),
+ static_cast<uint64_t>(Ctx.getTypeAlign(T))) / 8;
+}
+
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *& NumElements) {
+ QualType Type = E->getAllocatedType();
+ uint64_t TypeSizeInBytes = CGF.getContext().getTypeSize(Type) / 8;
+ const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+
+ if (!E->isArray())
+ return llvm::ConstantInt::get(SizeTy, TypeSizeInBytes);
+
+ uint64_t CookiePadding = CalculateCookiePadding(CGF.getContext(), E);
+
+ Expr::EvalResult Result;
+ if (E->getArraySize()->Evaluate(Result, CGF.getContext()) &&
+ !Result.HasSideEffects && Result.Val.isInt()) {
+
+ uint64_t AllocSize =
+ Result.Val.getInt().getZExtValue() * TypeSizeInBytes + CookiePadding;
+
+ NumElements =
+ llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue());
+
+ return llvm::ConstantInt::get(SizeTy, AllocSize);
+ }
+
+ // Emit the array size expression.
+ NumElements = CGF.EmitScalarExpr(E->getArraySize());
+
+ // Multiply with the type size.
+ llvm::Value *V =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(SizeTy, TypeSizeInBytes));
+
+ // And add the cookie padding if necessary.
+ if (CookiePadding)
+ V = CGF.Builder.CreateAdd(V, llvm::ConstantInt::get(SizeTy, CookiePadding));
+
+ return V;
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements) {
+ QualType AllocType = E->getAllocatedType();
+
+ if (!E->isArray()) {
+ if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+ CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
+ E->constructor_arg_begin(),
+ E->constructor_arg_end());
+
+ return;
+ }
+
+ // We have a POD type.
+ if (E->getNumConstructorArgs() == 0)
+ return;
+
+ assert(E->getNumConstructorArgs() == 1 &&
+ "Can only have one argument to initializer of POD type.");
+
+ const Expr *Init = E->getConstructorArg(0);
+
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.Builder.CreateStore(CGF.EmitScalarExpr(Init), NewPtr);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else
+ CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+ return;
+ }
+
+ if (CXXConstructorDecl *Ctor = E->getConstructor())
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr);
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ QualType AllocType = E->getAllocatedType();
+ FunctionDecl *NewFD = E->getOperatorNew();
+ const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList NewArgs;
+
+ // The allocation size is the first argument.
+ QualType SizeTy = getContext().getSizeType();
+
+ llvm::Value *NumElements = 0;
+ llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
+
+ NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+ QualType ArgType = NewFTy->getArgType(i);
+
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ "type mismatch in call argument!");
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ NewArg != NewArgEnd; ++NewArg) {
+ QualType ArgType = NewArg->getType();
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+ ArgType));
+ }
+
+ // Emit the call to new.
+ RValue RV =
+ EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
+ CGM.GetAddrOfFunction(NewFD), NewArgs, NewFD);
+
+ // If an allocation function is declared with an empty exception specification
+ // it returns null to indicate failure to allocate storage. [expr.new]p13.
+ // (We don't need to check for null when there's no new initializer and
+ // we're allocating a POD type).
+ bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+ !(AllocType->isPODType() && !E->hasInitializer());
+
+ llvm::BasicBlock *NewNull = 0;
+ llvm::BasicBlock *NewNotNull = 0;
+ llvm::BasicBlock *NewEnd = 0;
+
+ llvm::Value *NewPtr = RV.getScalarVal();
+
+ if (NullCheckResult) {
+ NewNull = createBasicBlock("new.null");
+ NewNotNull = createBasicBlock("new.notnull");
+ NewEnd = createBasicBlock("new.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(NewPtr,
+ llvm::Constant::getNullValue(NewPtr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+ EmitBlock(NewNotNull);
+ }
+
+ if (uint64_t CookiePadding = CalculateCookiePadding(getContext(), E)) {
+ uint64_t CookieOffset =
+ CookiePadding - getContext().getTypeSize(SizeTy) / 8;
+
+ llvm::Value *NumElementsPtr =
+ Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset);
+
+ NumElementsPtr = Builder.CreateBitCast(NumElementsPtr,
+ ConvertType(SizeTy)->getPointerTo());
+ Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Now add the padding to the new ptr.
+ NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, CookiePadding);
+ }
+
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+
+ EmitNewInitializer(*this, E, NewPtr, NumElements);
+
+ if (NullCheckResult) {
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewNull);
+ Builder.CreateBr(NewEnd);
+ EmitBlock(NewEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(NewPtr, NewNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+
+ NewPtr = PHI;
+ }
+
+ return NewPtr;
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ if (E->isArrayForm()) {
+ ErrorUnsupported(E, "delete[] expression");
+ return;
+ };
+
+ // Get at the argument before we performed the implicit conversion
+ // to void*.
+ const Expr *Arg = E->getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CastExpr::CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+
+ llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
+ "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ // Call the destructor if necessary.
+ if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!RD->hasTrivialDestructor()) {
+ const CXXDestructorDecl *Dtor = RD->getDestructor(getContext());
+ if (Dtor->isVirtual()) {
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(Dtor),
+ /*isVariadic=*/false);
+
+ llvm::Value *Callee = BuildVirtualCall(Dtor, Ptr, Ty);
+ EmitCXXMemberCall(Dtor, Callee, Ptr, 0, 0);
+ } else
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, Ptr);
+ }
+ }
+ }
+
+ // Call delete.
+ FunctionDecl *DeleteFD = E->getOperatorDelete();
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ QualType ArgTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
+
+ // Emit the call to delete.
+ EmitCall(CGM.getTypes().getFunctionInfo(DeleteFTy->getResultType(),
+ DeleteArgs),
+ CGM.GetAddrOfFunction(DeleteFD),
+ DeleteArgs, DeleteFD);
+
+ EmitBlock(DeleteEnd);
+}
diff --git a/lib/CodeGen/CGCXXTemp.cpp b/lib/CodeGen/CGCXXTemp.cpp
index a6e6d11505b6..4768556f6bca 100644
--- a/lib/CodeGen/CGCXXTemp.cpp
+++ b/lib/CodeGen/CGCXXTemp.cpp
@@ -15,29 +15,29 @@
using namespace clang;
using namespace CodeGen;
-void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
llvm::Value *Ptr) {
llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
-
+
llvm::Value *CondPtr = 0;
-
- // Check if temporaries need to be conditional. If so, we'll create a
- // condition boolean, initialize it to 0 and
+
+ // Check if temporaries need to be conditional. If so, we'll create a
+ // condition boolean, initialize it to 0 and
if (!ConditionalTempDestructionStack.empty()) {
- CondPtr = CreateTempAlloca(llvm::Type::Int1Ty, "cond");
-
+ CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
+
// Initialize it to false. This initialization takes place right after
// the alloca insert point.
- llvm::StoreInst *SI =
- new llvm::StoreInst(llvm::ConstantInt::getFalse(), CondPtr);
+ llvm::StoreInst *SI =
+ new llvm::StoreInst(llvm::ConstantInt::getFalse(VMContext), CondPtr);
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter((llvm::Instruction *)AllocaInsertPt, SI);
// Now set it to true.
- Builder.CreateStore(llvm::ConstantInt::getTrue(), CondPtr);
+ Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
}
-
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
+
+ LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
CondPtr));
PushCleanupBlock(DtorBlock);
@@ -45,16 +45,22 @@ void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
void CodeGenFunction::PopCXXTemporary() {
const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
-
+
CleanupBlockInfo CleanupInfo = PopCleanupBlock();
- assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
+ assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
"Cleanup block mismatch!");
- assert(!CleanupInfo.SwitchBlock &&
+ assert(!CleanupInfo.SwitchBlock &&
"Should not have a switch block for temporary cleanup!");
- assert(!CleanupInfo.EndBlock &&
+ assert(!CleanupInfo.EndBlock &&
"Should not have an end block for temporary cleanup!");
-
- EmitBlock(Info.DtorBlock);
+
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ if (CurBB && !CurBB->getTerminator() &&
+ Info.DtorBlock->getNumUses() == 0) {
+ CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
+ delete Info.DtorBlock;
+ } else
+ EmitBlock(Info.DtorBlock);
llvm::BasicBlock *CondEnd = 0;
@@ -63,52 +69,80 @@ void CodeGenFunction::PopCXXTemporary() {
if (Info.CondPtr) {
llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
CondEnd = createBasicBlock("cond.dtor.end");
-
+
llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
Builder.CreateCondBr(Cond, CondBlock, CondEnd);
EmitBlock(CondBlock);
}
-
+
EmitCXXDestructorCall(Info.Temporary->getDestructor(),
Dtor_Complete, Info.ThisPtr);
if (CondEnd) {
// Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(), Info.CondPtr);
+ Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
EmitBlock(CondEnd);
}
-
+
LiveTemporaries.pop_back();
}
RValue
CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
llvm::Value *AggLoc,
- bool isAggLocVolatile) {
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
// If we shouldn't destroy the temporaries, just emit the
// child expression.
if (!E->shouldDestroyTemporaries())
- return EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile);
+ return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ /*IgnoreResult=*/false, IsInitializer);
// Keep track of the current cleanup stack depth.
size_t CleanupStackDepth = CleanupEntries.size();
(void) CleanupStackDepth;
unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile);
-
+
+ RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ /*IgnoreResult=*/false, IsInitializer);
+
// Pop temporaries.
while (LiveTemporaries.size() > OldNumLiveTemporaries)
PopCXXTemporary();
-
+
assert(CleanupEntries.size() == CleanupStackDepth &&
"Cleanup size mismatch!");
-
+
return RV;
}
-void
+LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
+ const CXXExprWithTemporaries *E) {
+ // If we shouldn't destroy the temporaries, just emit the
+ // child expression.
+ if (!E->shouldDestroyTemporaries())
+ return EmitLValue(E->getSubExpr());
+
+ // Keep track of the current cleanup stack depth.
+ size_t CleanupStackDepth = CleanupEntries.size();
+ (void) CleanupStackDepth;
+
+ unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Pop temporaries.
+ while (LiveTemporaries.size() > OldNumLiveTemporaries)
+ PopCXXTemporary();
+
+ assert(CleanupEntries.size() == CleanupStackDepth &&
+ "Cleanup size mismatch!");
+
+ return LV;
+}
+
+void
CodeGenFunction::PushConditionalTempDestruction() {
// Store the current number of live temporaries.
ConditionalTempDestructionStack.push_back(LiveTemporaries.size());
@@ -117,13 +151,13 @@ CodeGenFunction::PushConditionalTempDestruction() {
void CodeGenFunction::PopConditionalTempDestruction() {
size_t NumLiveTemporaries = ConditionalTempDestructionStack.back();
ConditionalTempDestructionStack.pop_back();
-
+
// Pop temporaries.
while (LiveTemporaries.size() > NumLiveTemporaries) {
- assert(LiveTemporaries.back().CondPtr &&
+ assert(LiveTemporaries.back().CondPtr &&
"Conditional temporary must have a cond ptr!");
PopCXXTemporary();
- }
+ }
}
-
+
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 97391bc620be..bad166f01ef5 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -33,19 +33,49 @@ using namespace CodeGen;
// FIXME: Use iterator and sidestep silly type array creation.
-const
+const
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
- return getFunctionInfo(FTNP->getResultType(),
- llvm::SmallVector<QualType, 16>());
+ // FIXME: Set calling convention correctly, it needs to be associated with the
+ // type somehow.
+ return getFunctionInfo(FTNP->getResultType(),
+ llvm::SmallVector<QualType, 16>(), 0);
}
-const
+const
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
llvm::SmallVector<QualType, 16> ArgTys;
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys);
+ // FIXME: Set calling convention correctly, it needs to be associated with the
+ // type somehow.
+ return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
+}
+
+static unsigned getCallingConventionForDecl(const Decl *D) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return llvm::CallingConv::X86_StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return llvm::CallingConv::X86_FastCall;
+
+ return llvm::CallingConv::C;
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ llvm::SmallVector<QualType, 16> ArgTys;
+
+ // Add the 'this' pointer.
+ ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
+
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FTP->getArgType(i));
+
+ // FIXME: Set calling convention correctly, it needs to be associated with the
+ // type somehow.
+ return getFunctionInfo(FTP->getResultType(), ArgTys, 0);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
@@ -53,22 +83,32 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
// Add the 'this' pointer unless this is a static method.
if (MD->isInstance())
ArgTys.push_back(MD->getThisType(Context));
-
- const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
+
+ const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys);
+ return getFunctionInfo(FTP->getResultType(), ArgTys,
+ getCallingConventionForDecl(MD));
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
return getFunctionInfo(MD);
+
+ unsigned CallingConvention = getCallingConventionForDecl(FD);
+ const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
+ if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
+ return getFunctionInfo(FNTP->getResultType(),
+ llvm::SmallVector<QualType, 16>(),
+ CallingConvention);
- const FunctionType *FTy = FD->getType()->getAsFunctionType();
- if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
- return getFunctionInfo(FTP);
- return getFunctionInfo(cast<FunctionNoProtoType>(FTy));
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+ llvm::SmallVector<QualType, 16> ArgTys;
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
+ ArgTys.push_back(FPT->getArgType(i));
+ return getFunctionInfo(FPT->getResultType(), ArgTys, CallingConvention);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
@@ -79,34 +119,39 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
e = MD->param_end(); i != e; ++i)
ArgTys.push_back((*i)->getType());
- return getFunctionInfo(MD->getResultType(), ArgTys);
+ return getFunctionInfo(MD->getResultType(), ArgTys,
+ getCallingConventionForDecl(MD));
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const CallArgList &Args) {
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const CallArgList &Args,
+ unsigned CallingConvention){
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
- for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
- return getFunctionInfo(ResTy, ArgTys);
+ return getFunctionInfo(ResTy, ArgTys, CallingConvention);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const FunctionArgList &Args) {
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+ const FunctionArgList &Args,
+ unsigned CallingConvention){
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
- return getFunctionInfo(ResTy, ArgTys);
+ return getFunctionInfo(ResTy, ArgTys, CallingConvention);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const llvm::SmallVector<QualType, 16> &ArgTys) {
+ const llvm::SmallVector<QualType, 16> &ArgTys,
+ unsigned CallingConvention){
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end());
+ CGFunctionInfo::Profile(ID, CallingConvention, ResTy,
+ ArgTys.begin(), ArgTys.end());
void *InsertPos = 0;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
@@ -114,17 +159,21 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(ResTy, ArgTys);
+ FI = new CGFunctionInfo(CallingConvention, ResTy, ArgTys);
FunctionInfos.InsertNode(FI, InsertPos);
// Compute ABI information.
- getABIInfo().computeInfo(*FI, getContext());
+ getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
return *FI;
}
-CGFunctionInfo::CGFunctionInfo(QualType ResTy,
- const llvm::SmallVector<QualType, 16> &ArgTys) {
+CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
+ QualType ResTy,
+ const llvm::SmallVector<QualType, 16> &ArgTys)
+ : CallingConvention(_CallingConvention),
+ EffectiveCallingConvention(_CallingConvention)
+{
NumArgs = ArgTys.size();
Args = new ArgInfo[1 + NumArgs];
Args[0].type = ResTy;
@@ -134,20 +183,20 @@ CGFunctionInfo::CGFunctionInfo(QualType ResTy,
/***/
-void CodeGenTypes::GetExpandedTypes(QualType Ty,
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
std::vector<const llvm::Type*> &ArgTys) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
- assert(!RD->hasFlexibleArrayMember() &&
+ assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
-
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
const FieldDecl *FD = *i;
- assert(!FD->isBitField() &&
+ assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
-
+
QualType FT = FD->getType();
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
GetExpandedTypes(FT, ArgTys);
@@ -157,19 +206,19 @@ void CodeGenTypes::GetExpandedTypes(QualType Ty,
}
}
-llvm::Function::arg_iterator
+llvm::Function::arg_iterator
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
llvm::Function::arg_iterator AI) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
RecordDecl *RD = RT->getDecl();
- assert(LV.isSimple() &&
- "Unexpected non-simple lvalue during struct expansion.");
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
llvm::Value *Addr = LV.getAddress();
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- FieldDecl *FD = *i;
+ FieldDecl *FD = *i;
QualType FT = FD->getType();
// FIXME: What are the right qualifiers here?
@@ -185,8 +234,8 @@ CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
return AI;
}
-void
-CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::SmallVector<llvm::Value*, 16> &Args) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
@@ -196,16 +245,16 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *Addr = RV.getAggregateAddr();
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- FieldDecl *FD = *i;
+ FieldDecl *FD = *i;
QualType FT = FD->getType();
-
+
// FIXME: What are the right qualifiers here?
LValue LV = EmitLValueForField(Addr, FD, false, 0);
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
} else {
RValue RV = EmitLoadOfLValue(LV, FT);
- assert(RV.isScalar() &&
+ assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
Args.push_back(RV.getScalarVal());
}
@@ -221,7 +270,7 @@ CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
const llvm::Type *Ty,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy =
+ const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
@@ -244,9 +293,9 @@ static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
// Otherwise do coercion through memory. This is stupid, but
// simple.
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
- llvm::Value *Casted =
+ llvm::Value *Casted =
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
- llvm::StoreInst *Store =
+ llvm::StoreInst *Store =
CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
// FIXME: Use better alignment / avoid requiring aligned store.
Store->setAlignment(1);
@@ -263,7 +312,7 @@ static void CreateCoercedStore(llvm::Value *Src,
llvm::Value *DstPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy = Src->getType();
- const llvm::Type *DstTy =
+ const llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -287,7 +336,7 @@ static void CreateCoercedStore(llvm::Value *Src,
// to that information.
llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
CGF.Builder.CreateStore(Src, Tmp);
- llvm::Value *Casted =
+ llvm::Value *Casted =
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
// FIXME: Use better alignment / avoid requiring aligned load.
@@ -321,25 +370,25 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Indirect: {
assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
- ResultType = llvm::Type::VoidTy;
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
const llvm::Type *STy = ConvertType(RetTy);
ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
break;
}
case ABIArgInfo::Ignore:
- ResultType = llvm::Type::VoidTy;
+ ResultType = llvm::Type::getVoidTy(getLLVMContext());
break;
case ABIArgInfo::Coerce:
ResultType = RetAI.getCoerceToType();
break;
}
-
- for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
const ABIArgInfo &AI = it->info;
-
+
switch (AI.getKind()) {
case ABIArgInfo::Ignore:
break;
@@ -359,7 +408,7 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
case ABIArgInfo::Direct:
ArgTys.push_back(ConvertType(it->type));
break;
-
+
case ABIArgInfo::Expand:
GetExpandedTypes(it->type, ArgTys);
break;
@@ -371,10 +420,13 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const Decl *TargetDecl,
- AttributeListType &PAL) {
+ AttributeListType &PAL,
+ unsigned &CallingConv) {
unsigned FuncAttrs = 0;
unsigned RetAttrs = 0;
+ CallingConv = FI.getEffectiveCallingConvention();
+
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<NoThrowAttr>())
@@ -385,6 +437,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs |= llvm::Attribute::ReadNone;
else if (TargetDecl->hasAttr<PureAttr>())
FuncAttrs |= llvm::Attribute::ReadOnly;
+ if (TargetDecl->hasAttr<MallocAttr>())
+ RetAttrs |= llvm::Attribute::NoAlias;
}
if (CompileOpts.DisableRedZone)
@@ -412,7 +466,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Indirect:
- PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
llvm::Attribute::StructRet |
llvm::Attribute::NoAlias));
++Index;
@@ -426,7 +480,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
if (RetAttrs)
@@ -437,12 +491,12 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// register variable.
signed RegParm = 0;
if (TargetDecl)
- if (const RegparmAttr *RegParmAttr
+ if (const RegparmAttr *RegParmAttr
= TargetDecl->getAttr<RegparmAttr>())
RegParm = RegParmAttr->getNumParams();
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
- for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
@@ -453,7 +507,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Indirect:
- Attributes |= llvm::Attribute::ByVal;
+ if (AI.getIndirectByVal())
+ Attributes |= llvm::Attribute::ByVal;
+
Attributes |=
llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
// byval disables readnone and readonly.
@@ -481,10 +537,10 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
// Skip increment, no matching LLVM parameter.
- continue;
+ continue;
case ABIArgInfo::Expand: {
- std::vector<const llvm::Type*> Tys;
+ std::vector<const llvm::Type*> Tys;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
@@ -493,7 +549,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
continue;
}
}
-
+
if (Attributes)
PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
++Index;
@@ -505,18 +561,31 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getResultType().getUnqualifiedType();
+ const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
// FIXME: We no longer need the types from FunctionArgList; lift up and
// simplify.
// Emit allocs for param decls. Give the LLVM Argument nodes names.
llvm::Function::arg_iterator AI = Fn->arg_begin();
-
+
// Name the struct return argument.
if (CGM.ReturnTypeUsesSret(FI)) {
AI->setName("agg.result");
++AI;
}
-
+
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
@@ -541,7 +610,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
V = EmitScalarConversion(V, Ty, Arg->getType());
}
}
- EmitParmDecl(*Arg, V);
+ EmitParmDecl(*Arg, V);
break;
}
@@ -565,36 +634,36 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
EmitParmDecl(*Arg, V);
break;
}
-
+
case ABIArgInfo::Expand: {
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
std::string Name = Arg->getNameAsString();
- llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
(Name + ".addr").c_str());
// FIXME: What are the right qualifiers here?
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
EmitParmDecl(*Arg, Temp);
// Name the arguments used in expansion and increment AI.
unsigned Index = 0;
for (; AI != End; ++AI, ++Index)
- AI->setName(Name + "." + llvm::utostr(Index));
+ AI->setName(Name + "." + llvm::Twine(Index));
continue;
}
case ABIArgInfo::Ignore:
// Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty)) {
+ if (hasAggregateLLVMType(Ty)) {
EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
} else {
EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
}
-
+
// Skip increment, no matching LLVM parameter.
- continue;
+ continue;
case ABIArgInfo::Coerce: {
assert(AI != Fn->arg_end() && "Argument mismatch!");
@@ -653,16 +722,16 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
-
+
case ABIArgInfo::Coerce:
RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
}
-
+
if (RV) {
Builder.CreateRet(RV);
} else {
@@ -673,12 +742,12 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
if (ArgType->isReferenceType())
return EmitReferenceBindingToExpr(E, ArgType);
-
+
return EmitAnyExprToTemp(E);
}
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
- llvm::Value *Callee,
+ llvm::Value *Callee,
const CallArgList &CallArgs,
const Decl *TargetDecl) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -688,17 +757,17 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
-
-
+
+
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result.
if (CGM.ReturnTypeUsesSret(CallInfo))
Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
-
+
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
- for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->first;
@@ -711,7 +780,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (RV.isScalar())
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
else
- StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
} else {
Args.push_back(RV.getAggregateAddr());
}
@@ -730,7 +799,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
}
break;
-
+
case ABIArgInfo::Ignore:
break;
@@ -743,9 +812,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else if (RV.isComplex()) {
SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
- } else
+ } else
SrcPtr = RV.getAggregateAddr();
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
break;
}
@@ -755,7 +824,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
}
-
+
// If the callee is a bitcast of a function to a varargs pointer to function
// type, check to see if we can remove the bitcast. This handles some cases
// with unprototyped functions.
@@ -765,7 +834,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const llvm::FunctionType *CurFT =
cast<llvm::FunctionType>(CurPT->getElementType());
const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
-
+
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
@@ -776,7 +845,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgsMatch = false;
break;
}
-
+
// Strip the cast if we can get away with it. This is a nice cleanup,
// but also allows us to inline the function at -O0 if it is marked
// always_inline.
@@ -784,28 +853,27 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Callee = CalleeF;
}
}
-
+
llvm::BasicBlock *InvokeDest = getInvokeDest();
+ unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
AttributeList.end());
-
+
llvm::CallSite CS;
if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
Args.data(), Args.data()+Args.size());
EmitBlock(Cont);
}
CS.setAttributes(Attrs);
- if (const llvm::Function *F =
- dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
- CS.setCallingConv(F->getCallingConv());
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
// If the call doesn't return, finish the basic block and clear the
// insertion point; this allows the rest of IRgen to discard
@@ -813,18 +881,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CS.doesNotReturn()) {
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
-
+
// FIXME: For now, emit a dummy basic block because expr emitters in
// generally are not ready to handle emitting expressions at unreachable
// points.
EnsureInsertPoint();
-
+
// Return a reasonable RValue.
return GetUndefRValue(RetTy);
- }
+ }
llvm::Instruction *CI = CS.getInstruction();
- if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy)
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
CI->setName("call");
switch (RetAI.getKind()) {
@@ -866,7 +934,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
assert(0 && "Unhandled ABIArgInfo::Kind");
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index daf6f0004501..ebf801dcaad1 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -49,9 +49,9 @@ namespace CodeGen {
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
- typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
16> FunctionArgList;
-
+
/// CGFunctionInfo - Class to encapsulate the information about a
/// function definition.
class CGFunctionInfo : public llvm::FoldingSetNode {
@@ -60,6 +60,14 @@ namespace CodeGen {
ABIArgInfo info;
};
+ /// The LLVM::CallingConv to use for this function (as specified by the
+ /// user).
+ unsigned CallingConvention;
+
+ /// The LLVM::CallingConv to actually use for this function, which may
+ /// depend on the ABI.
+ unsigned EffectiveCallingConvention;
+
unsigned NumArgs;
ArgInfo *Args;
@@ -67,7 +75,8 @@ namespace CodeGen {
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(QualType ResTy,
+ CGFunctionInfo(unsigned CallingConvention,
+ QualType ResTy,
const llvm::SmallVector<QualType, 16> &ArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -78,21 +87,37 @@ namespace CodeGen {
unsigned arg_size() const { return NumArgs; }
+ /// getCallingConvention - Return the user specified calling
+ /// convention.
+ unsigned getCallingConvention() const { return CallingConvention; }
+
+ /// getEffectiveCallingConvention - Return the actual calling convention to
+ /// use, which may depend on the ABI.
+ unsigned getEffectiveCallingConvention() const {
+ return EffectiveCallingConvention;
+ }
+ void setEffectiveCallingConvention(unsigned Value) {
+ EffectiveCallingConvention = Value;
+ }
+
QualType getReturnType() const { return Args[0].type; }
ABIArgInfo &getReturnInfo() { return Args[0].info; }
const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(getCallingConvention());
getReturnType().Profile(ID);
for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
it->type.Profile(ID);
}
template<class Iterator>
- static void Profile(llvm::FoldingSetNodeID &ID,
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ unsigned CallingConvention,
QualType ResTy,
Iterator begin,
Iterator end) {
+ ID.AddInteger(CallingConvention);
ResTy.Profile(ID);
for (; begin != end; ++begin)
begin->Profile(ID);
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 2bf8a222a253..4c624205b4ca 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -19,6 +20,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
#include "clang/Frontend/CompileOptions.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
@@ -47,6 +49,22 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
CurLoc = M->getContext().getSourceManager().getInstantiationLoc(Loc);
}
+/// getContext - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContext(const VarDecl *Decl,
+ llvm::DIDescriptor &CompileUnit) {
+ if (Decl->isFileVarDecl())
+ return CompileUnit;
+ if (Decl->getDeclContext()->isFunctionOrMethod()) {
+ // Find the last subprogram in region stack.
+ for (unsigned RI = RegionStack.size(), RE = 0; RI != RE; --RI) {
+ llvm::DIDescriptor R = RegionStack[RI - 1];
+ if (R.isSubprogram())
+ return R;
+ }
+ }
+ return CompileUnit;
+}
+
/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new
/// one if necessary. This returns null for invalid source locations.
llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
@@ -59,7 +77,7 @@ llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
FileName = PLoc.getFilename();
FID = PLoc.getIncludeLoc().getRawEncoding();
}
-
+
// See if this compile unit has been used before.
llvm::DICompileUnit &Unit = CompileUnitCache[FID];
if (!Unit.isNull()) return Unit;
@@ -104,7 +122,11 @@ llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
LangTag = llvm::dwarf::DW_LANG_C89;
}
- std::string Producer = "clang 1.0";// FIXME: clang version.
+ std::string Producer =
+#ifdef CLANG_VENDOR
+ CLANG_VENDOR
+#endif
+ "clang " CLANG_VERSION_STRING;
bool isOptimized = LO.Optimize;
const char *Flags = ""; // FIXME: Encode command line options.
@@ -112,10 +134,10 @@ llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
unsigned RuntimeVers = 0;
if (LO.ObjC1)
RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
-
+
// Create new compile unit.
return Unit = DebugFactory.CreateCompileUnit(LangTag, AbsFileName.getLast(),
- AbsFileName.getDirname(),
+ AbsFileName.getDirname(),
Producer, isMain, isOptimized,
Flags, RuntimeVers);
}
@@ -143,14 +165,15 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
case BuiltinType::Float:
+ case BuiltinType::LongDouble:
case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
- }
+ }
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(BT);
uint64_t Align = M->getContext().getTypeAlign(BT);
uint64_t Offset = 0;
-
- return DebugFactory.CreateBasicType(Unit,
+
+ return DebugFactory.CreateBasicType(Unit,
BT->getName(M->getContext().getLangOptions()),
Unit, 0, Size, Align,
Offset, /*flags*/ 0, Encoding);
@@ -162,52 +185,72 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
Encoding = llvm::dwarf::DW_ATE_lo_user;
-
+
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
uint64_t Offset = 0;
-
+
return DebugFactory.CreateBasicType(Unit, "complex",
Unit, 0, Size, Align,
Offset, /*flags*/ 0, Encoding);
}
-/// getOrCreateCVRType - Get the CVR qualified type from the cache or create
+/// CreateCVRType - Get the qualified type from the cache or create
/// a new one if necessary.
-llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) {
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DICompileUnit Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+
// We will create one Derived type for one qualifier and recurse to handle any
// additional ones.
- llvm::DIType FromTy;
unsigned Tag;
- if (Ty.isConstQualified()) {
+ if (Qc.hasConst()) {
Tag = llvm::dwarf::DW_TAG_const_type;
- Ty.removeConst();
- FromTy = getOrCreateType(Ty, Unit);
- } else if (Ty.isVolatileQualified()) {
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
Tag = llvm::dwarf::DW_TAG_volatile_type;
- Ty.removeVolatile();
- FromTy = getOrCreateType(Ty, Unit);
- } else {
- assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info");
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
Tag = llvm::dwarf::DW_TAG_restrict_type;
- Ty.removeRestrict();
- FromTy = getOrCreateType(Ty, Unit);
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
}
-
+
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
0, 0, 0, 0, 0, FromTy);
}
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = M->getContext().getTypeSize(Ty);
+ uint64_t Align = M->getContext().getTypeAlign(Ty);
+
+ return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+ "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0, EltTy);
+}
+
llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
llvm::DICompileUnit Unit) {
llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
-
+
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
"", llvm::DICompileUnit(),
0, Size, Align, 0, 0, EltTy);
@@ -258,14 +301,16 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTys.clear();
+ unsigned Flags = llvm::DIType::FlagAppleBlock;
+
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
- DefUnit, 0, FieldOffset, 0, 0, 0,
+ DefUnit, 0, FieldOffset, 0, 0, Flags,
llvm::DIType(), Elements);
-
+
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
Unit, "", llvm::DICompileUnit(),
0, Size, Align, 0, 0, EltTy);
@@ -329,9 +374,9 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
- DefUnit, 0, FieldOffset, 0, 0, 0,
+ DefUnit, 0, FieldOffset, 0, 0, Flags,
llvm::DIType(), Elements);
-
+
BlockLiteralGenericSet = true;
BlockLiteralGeneric
= DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
@@ -345,7 +390,7 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
-
+
// We don't set size information, but do specify where the typedef was
// declared.
std::string TyName = Ty->getDecl()->getNameAsString();
@@ -366,7 +411,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
-
+
// Set up remainder of arguments if there is a prototype.
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
@@ -378,7 +423,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIArray EltTypeArray =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
-
+
return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
Unit, "", llvm::DICompileUnit(),
0, 0, 0, 0, 0,
@@ -389,7 +434,7 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DICompileUnit Unit) {
RecordDecl *Decl = Ty->getDecl();
-
+
unsigned Tag;
if (Decl->isStruct())
Tag = llvm::dwarf::DW_TAG_structure_type;
@@ -412,24 +457,24 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
DefUnit = getOrCreateCompileUnit(Decl->getLocation());
Line = PLoc.getLine();
}
-
+
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
// Then (if it is a definition) we go through and get debug info for all of
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIType FwdDecl =
+ llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray());
-
+
// If this is just a forward declaration, return it.
if (!Decl->getDefinition(M->getContext()))
return FwdDecl;
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
@@ -438,7 +483,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
unsigned FieldNo = 0;
for (RecordDecl::field_iterator I = Decl->field_begin(),
- E = Decl->field_end();
+ E = Decl->field_end();
I != E; ++I, ++FieldNo) {
FieldDecl *Field = *I;
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
@@ -454,7 +499,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
llvm::DICompileUnit FieldDefUnit;
unsigned FieldLine = 0;
-
+
if (!PLoc.isInvalid()) {
FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
FieldLine = PLoc.getLine();
@@ -464,18 +509,18 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
-
+
// Bit size, align and offset of the type.
FieldSize = M->getContext().getTypeSize(FType);
Expr *BitWidth = Field->getBitWidth();
if (BitWidth)
FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
-
+
FieldAlign = M->getContext().getTypeAlign(FType);
}
- uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
-
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
// Create a DW_TAG_member node to remember the offset of this field in the
// struct. FIXME: This is an absolutely insane way to capture this
// information. When we gut debug info, this should be fixed.
@@ -485,23 +530,25 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
}
-
+
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
- llvm::DIType RealDecl =
+
+ llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
Align, 0, 0, llvm::DIType(), Elements);
+ // Update TypeCache.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl.getNode();
+
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
- FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
- FwdDecl.getGV()->eraseFromParent();
-
+ FwdDecl.replaceAllUsesWith(RealDecl);
+
return RealDecl;
}
@@ -509,7 +556,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DICompileUnit Unit) {
ObjCInterfaceDecl *Decl = Ty->getDecl();
-
+
unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
SourceManager &SM = M->getContext().getSourceManager();
@@ -520,7 +567,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
unsigned RuntimeLang = DefUnit.getLanguage();
// To handle recursive interface, we
@@ -529,27 +576,27 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// its members. Finally, we create a descriptor for the complete type (which
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DIType FwdDecl =
+ llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray(),
RuntimeLang);
-
+
// If this is just a forward declaration, return it.
if (Decl->isForwardDecl())
return FwdDecl;
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
ObjCInterfaceDecl *SClass = Decl->getSuperClass();
if (SClass) {
- llvm::DIType SClassTy =
+ llvm::DIType SClassTy =
getOrCreateType(M->getContext().getObjCInterfaceType(SClass), Unit);
- llvm::DIType InhTag =
+ llvm::DIType InhTag =
DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
Unit, "", llvm::DICompileUnit(), 0, 0, 0,
0 /* offset */, 0, SClassTy);
@@ -576,13 +623,13 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
QualType FType = Field->getType();
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
-
+
// Bit size, align and offset of the type.
FieldSize = M->getContext().getTypeSize(FType);
Expr *BitWidth = Field->getBitWidth();
@@ -592,14 +639,14 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
FieldAlign = M->getContext().getTypeAlign(FType);
}
- uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
-
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
Flags = llvm::DIType::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIType::FlagPrivate;
-
+
// Create a DW_TAG_member node to remember the offset of this field in the
// struct. FIXME: This is an absolutely insane way to capture this
// information. When we gut debug info, this should be fixed.
@@ -609,24 +656,26 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
-
+
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
- llvm::DIType RealDecl =
+
+ llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
Align, 0, 0, llvm::DIType(), Elements,
RuntimeLang);
+ // Update TypeCache.
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl.getNode();
+
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
- FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV());
- FwdDecl.getGV()->eraseFromParent();
-
+ FwdDecl.replaceAllUsesWith(RealDecl);
+
return RealDecl;
}
@@ -637,13 +686,13 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
// Create DIEnumerator elements for each enumerator.
- for (EnumDecl::enumerator_iterator
+ for (EnumDecl::enumerator_iterator
Enum = Decl->enumerator_begin(), EnumEnd = Decl->enumerator_end();
Enum != EnumEnd; ++Enum) {
Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsString(),
Enum->getInitVal().getZExtValue()));
}
-
+
// Return a CompositeType for the enum itself.
llvm::DIArray EltArray =
DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
@@ -655,7 +704,7 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
// Size and align of the type.
uint64_t Size = 0;
unsigned Align = 0;
@@ -663,7 +712,7 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
Size = M->getContext().getTypeSize(Ty);
Align = M->getContext().getTypeAlign(Ty);
}
-
+
return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
Unit, EnumName, DefUnit, Line,
Size, Align, 0, 0,
@@ -676,7 +725,7 @@ llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
return CreateType(RT, Unit);
else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
return CreateType(ET, Unit);
-
+
return llvm::DIType();
}
@@ -684,8 +733,8 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
llvm::DICompileUnit Unit) {
uint64_t Size;
uint64_t Align;
-
-
+
+
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
@@ -699,7 +748,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
Size = M->getContext().getTypeSize(Ty);
Align = M->getContext().getTypeAlign(Ty);
}
-
+
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
@@ -708,12 +757,13 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
uint64_t Upper = 0;
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Upper = CAT->getSize().getZExtValue() - 1;
+ if (CAT->getSize().getZExtValue())
+ Upper = CAT->getSize().getZExtValue() - 1;
// FIXME: Verify this is right for VLAs.
Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
EltTy = Ty->getElementType();
}
-
+
llvm::DIArray SubscriptArray =
DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
@@ -731,14 +781,29 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
llvm::DICompileUnit Unit) {
if (Ty.isNull())
return llvm::DIType();
-
- // Check to see if the compile unit already has created this type.
- llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()];
- if (!Slot.isNull()) return Slot;
- // Handle CVR qualifiers, which recursively handles what they refer to.
- if (Ty.getCVRQualifiers())
- return Slot = CreateCVRType(Ty, Unit);
+ // Check for existing entry.
+ std::map<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateTypeNode(Ty, Unit);
+ TypeCache.insert(std::make_pair(Ty.getAsOpaquePtr(), Res.getNode()));
+ return Res;
+}
+
+/// getOrCreateTypeNode - Get the type metadata node from the cache or create a
+/// new one if necessary.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
+ llvm::DICompileUnit Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasQualifiers())
+ return CreateQualifiedType(Ty, Unit);
// Work out details of type.
switch (Ty->getTypeClass()) {
@@ -748,53 +813,52 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
assert(false && "Dependent types cannot show up in debug information");
-
+
+ default:
case Type::LValueReference:
case Type::RValueReference:
case Type::Vector:
case Type::ExtVector:
- case Type::ExtQual:
case Type::FixedWidthInt:
case Type::MemberPointer:
case Type::TemplateSpecialization:
case Type::QualifiedName:
// Unsupported types
return llvm::DIType();
- case Type::ObjCObjectPointer: // Encode id<p> in debug info just like id.
- return Slot = getOrCreateType(M->getContext().getObjCIdType(), Unit);
-
- case Type::ObjCQualifiedInterface: // Drop protocols from interface.
- case Type::ObjCInterface:
- return Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit);
- case Type::Builtin: return Slot = CreateType(cast<BuiltinType>(Ty), Unit);
- case Type::Complex: return Slot = CreateType(cast<ComplexType>(Ty), Unit);
- case Type::Pointer: return Slot = CreateType(cast<PointerType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
+ case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+ case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
- return Slot = CreateType(cast<BlockPointerType>(Ty), Unit);
- case Type::Typedef: return Slot = CreateType(cast<TypedefType>(Ty), Unit);
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
case Type::Enum:
- return Slot = CreateType(cast<TagType>(Ty), Unit);
+ return CreateType(cast<TagType>(Ty), Unit);
case Type::FunctionProto:
case Type::FunctionNoProto:
- return Slot = CreateType(cast<FunctionType>(Ty), Unit);
-
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::Elaborated:
+ return getOrCreateType(cast<ElaboratedType>(Ty)->getUnderlyingType(),
+ Unit);
+
case Type::ConstantArray:
+ case Type::ConstantArrayWithExpr:
+ case Type::ConstantArrayWithoutExpr:
case Type::VariableArray:
case Type::IncompleteArray:
- return Slot = CreateType(cast<ArrayType>(Ty), Unit);
+ return CreateType(cast<ArrayType>(Ty), Unit);
case Type::TypeOfExpr:
- return Slot = getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr()
- ->getType(), Unit);
+ return getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr()
+ ->getType(), Unit);
case Type::TypeOf:
- return Slot = getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(),
- Unit);
+ return getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(), Unit);
case Type::Decltype:
- return Slot = getOrCreateType(cast<DecltypeType>(Ty)->getUnderlyingExpr()
- ->getType(), Unit);
+ return getOrCreateType(cast<DecltypeType>(Ty)->getUnderlyingType(), Unit);
}
-
- return Slot;
}
/// EmitFunctionStart - Constructs the debug code for entering a function -
@@ -803,25 +867,27 @@ void CGDebugInfo::EmitFunctionStart(const char *Name, QualType ReturnType,
llvm::Function *Fn,
CGBuilderTy &Builder) {
const char *LinkageName = Name;
-
+
// Skip the asm prefix if it exists.
//
// FIXME: This should probably be the unmangled name?
if (Name[0] == '\01')
++Name;
-
+
// FIXME: Why is this using CurLoc???
llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
SourceManager &SM = M->getContext().getSourceManager();
unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
-
+
llvm::DISubprogram SP =
DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
getOrCreateType(ReturnType, Unit),
Fn->hasInternalLinkage(), true/*definition*/);
-
+
+#ifndef ATTACH_DEBUG_INFO_TO_AN_INSN
DebugFactory.InsertSubprogramStart(SP, Builder.GetInsertBlock());
-
+#endif
+
// Push function on region stack.
RegionStack.push_back(SP);
}
@@ -829,10 +895,10 @@ void CGDebugInfo::EmitFunctionStart(const char *Name, QualType ReturnType,
void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
-
+
// Don't bother if things are the same as last time.
SourceManager &SM = M->getContext().getSourceManager();
- if (CurLoc == PrevLoc
+ if (CurLoc == PrevLoc
|| (SM.getInstantiationLineNumber(CurLoc) ==
SM.getInstantiationLineNumber(PrevLoc)
&& SM.isFromSameFile(CurLoc, PrevLoc)))
@@ -844,8 +910,19 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
// Get the appropriate compile unit.
llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
+
+#ifdef ATTACH_DEBUG_INFO_TO_AN_INSN
+ llvm::DIDescriptor DR = RegionStack.back();
+ llvm::DIScope DS = llvm::DIScope(DR.getNode());
+ llvm::DILocation DO(NULL);
+ llvm::DILocation DL =
+ DebugFactory.CreateLocation(PLoc.getLine(), PLoc.getColumn(),
+ DS, DO);
+ Builder.SetCurrentDebugLocation(DL.getNode());
+#else
DebugFactory.InsertStopPoint(Unit, PLoc.getLine(), PLoc.getColumn(),
- Builder.GetInsertBlock());
+ Builder.GetInsertBlock());
+#endif
}
/// EmitRegionStart- Constructs the debug code for entering a declarative
@@ -854,9 +931,11 @@ void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
llvm::DIDescriptor D;
if (!RegionStack.empty())
D = RegionStack.back();
- D = DebugFactory.CreateBlock(D);
+ D = DebugFactory.CreateLexicalBlock(D);
RegionStack.push_back(D);
+#ifndef ATTACH_DEBUG_INFO_TO_AN_INSN
DebugFactory.InsertRegionStart(D, Builder.GetInsertBlock());
+#endif
}
/// EmitRegionEnd - Constructs the debug code for exiting a declarative
@@ -866,8 +945,10 @@ void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
// Provide an region stop point.
EmitStopPoint(Fn, Builder);
-
+
+#ifndef ATTACH_DEBUG_INFO_TO_AN_INSN
DebugFactory.InsertRegionEnd(RegionStack.back(), Builder.GetInsertBlock());
+#endif
RegionStack.pop_back();
}
@@ -884,7 +965,139 @@ void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag,
return;
llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
- llvm::DIType Ty = getOrCreateType(Decl->getType(), Unit);
+ QualType Type = Decl->getType();
+ llvm::DIType Ty = getOrCreateType(Type, Unit);
+ if (Decl->hasAttr<BlocksAttr>()) {
+ llvm::DICompileUnit DefUnit;
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy;
+
+ // Build up structure for the byref. See BuildByRefType.
+ FieldOffset = 0;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__isa", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__forwarding", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getFixedWidthIntType(32, true); // Int32Ty;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__flags", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getFixedWidthIntType(32, true); // Int32Ty;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__size", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ bool HasCopyAndDispose = M->BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__copy_helper", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__destroy_helper", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+ }
+
+ unsigned Align = M->getContext().getDeclAlignInBytes(Decl);
+ if (Align > M->getContext().Target.getPointerAlign(0) / 8) {
+ unsigned AlignedOffsetInBytes
+ = llvm::RoundUpToAlignment(FieldOffset/8, Align);
+ unsigned NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffset/8;
+
+ if (NumPaddingBytes > 0) {
+ llvm::APInt pad(32, NumPaddingBytes);
+ FType = M->getContext().getConstantArrayType(M->getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+ Unit, "", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+ }
+ }
+
+ FType = Type;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = Align*8;
+ std::string Name = Decl->getNameAsString();
+
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ Name, DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+ Ty = DebugFactory.CreateCompositeType(Tag, Unit, "",
+ llvm::DICompileUnit(),
+ 0, FieldOffset, 0, 0, Flags,
+ llvm::DIType(), Elements);
+ }
// Get location information.
SourceManager &SM = M->getContext().getSourceManager();
@@ -895,21 +1108,222 @@ void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag,
else
Unit = llvm::DICompileUnit();
-
+
// Create the descriptor for the variable.
- llvm::DIVariable D =
+ llvm::DIVariable D =
DebugFactory.CreateVariable(Tag, RegionStack.back(),Decl->getNameAsString(),
Unit, Line, Ty);
// Insert an llvm.dbg.declare into the current block.
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
}
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+ llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ const ValueDecl *Decl = BDRE->getDecl();
+ assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Do not emit variable debug information while generating optimized code.
+ // The llvm optimizer and code generator are not yet ready to support
+ // optimized code debugging.
+ const CompileOptions &CO = M->getCompileOpts();
+ if (CO.OptimizationLevel || Builder.GetInsertBlock() == 0)
+ return;
+
+ uint64_t XOffset = 0;
+ llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
+ QualType Type = Decl->getType();
+ llvm::DIType Ty = getOrCreateType(Type, Unit);
+ if (Decl->hasAttr<BlocksAttr>()) {
+ llvm::DICompileUnit DefUnit;
+ unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+ llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+ llvm::DIType FieldTy;
+
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIArray Elements;
+ llvm::DIType EltTy;
+
+ // Build up structure for the byref. See BuildByRefType.
+ FieldOffset = 0;
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__isa", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__forwarding", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getFixedWidthIntType(32, true); // Int32Ty;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__flags", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getFixedWidthIntType(32, true); // Int32Ty;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__size", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ bool HasCopyAndDispose = M->BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__copy_helper", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ FType = M->getContext().getPointerType(M->getContext().VoidTy);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ "__destroy_helper", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+ }
+
+ unsigned Align = M->getContext().getDeclAlignInBytes(Decl);
+ if (Align > M->getContext().Target.getPointerAlign(0) / 8) {
+ unsigned AlignedOffsetInBytes
+ = llvm::RoundUpToAlignment(FieldOffset/8, Align);
+ unsigned NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffset/8;
+
+ if (NumPaddingBytes > 0) {
+ llvm::APInt pad(32, NumPaddingBytes);
+ FType = M->getContext().getConstantArrayType(M->getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = M->getContext().getTypeAlign(FType);
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+ Unit, "", DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+ }
+ }
+
+ FType = Type;
+ FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = M->getContext().getTypeSize(FType);
+ FieldAlign = Align*8;
+ std::string Name = Decl->getNameAsString();
+
+ XOffset = FieldOffset;
+ FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+ Name, DefUnit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+ unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+ Ty = DebugFactory.CreateCompositeType(Tag, Unit, "",
+ llvm::DICompileUnit(),
+ 0, FieldOffset, 0, 0, Flags,
+ llvm::DIType(), Elements);
+ }
+
+ // Get location information.
+ SourceManager &SM = M->getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
+ unsigned Line = 0;
+ if (!PLoc.isInvalid())
+ Line = PLoc.getLine();
+ else
+ Unit = llvm::DICompileUnit();
+
+ uint64_t offset = CGF->BlockDecls[Decl];
+ llvm::SmallVector<llvm::Value *, 9> addr;
+ llvm::LLVMContext &VMContext = M->getLLVMContext();
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset));
+ if (BDRE->isByRef()) {
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpPlus));
+ offset = CGF->LLVMPointerWidth/8; // offset of __forwarding field
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset));
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::DIFactory::OpPlus));
+ offset = XOffset/8; // offset of x field
+ addr.push_back(llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset));
+ }
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DebugFactory.CreateComplexVariable(Tag, RegionStack.back(),
+ Decl->getNameAsString(), Unit, Line, Ty,
+ addr);
+ // Insert an llvm.dbg.declare into the current block.
+ DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertPoint());
+}
+
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *Decl,
llvm::Value *Storage,
CGBuilderTy &Builder) {
EmitDeclare(Decl, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
}
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
+ CodeGenFunction *CGF) {
+ EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+}
+
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
@@ -920,7 +1334,7 @@ void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
/// EmitGlobalVariable - Emit information about a global variable.
-void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *Decl) {
// Do not emit variable debug information while generating optimized code.
@@ -936,29 +1350,30 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
- std::string Name = Decl->getNameAsString();
+ std::string Name = Var->getName();
QualType T = Decl->getType();
if (T->isIncompleteArrayType()) {
-
+
// CodeGen turns int[] into int[1] so we'll do the same here.
llvm::APSInt ConstVal(32);
-
+
ConstVal = 1;
QualType ET = M->getContext().getAsArrayType(T)->getElementType();
-
- T = M->getContext().getConstantArrayType(ET, ConstVal,
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
- DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo,
+ DebugFactory.CreateGlobalVariable(getContext(Decl, Unit),
+ Name, Name, "", Unit, LineNo,
getOrCreateType(T, Unit),
Var->hasInternalLinkage(),
true/*definition*/, Var);
}
/// EmitGlobalVariable - Emit information about an objective-c interface.
-void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *Decl) {
// Create global variable debug descriptor.
llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
@@ -970,14 +1385,14 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
QualType T = M->getContext().getObjCInterfaceType(Decl);
if (T->isIncompleteArrayType()) {
-
+
// CodeGen turns int[] into int[1] so we'll do the same here.
llvm::APSInt ConstVal(32);
-
+
ConstVal = 1;
QualType ET = M->getContext().getAsArrayType(T)->getElementType();
-
- T = M->getContext().getConstantArrayType(ET, ConstVal,
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
@@ -986,4 +1401,3 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
Var->hasInternalLinkage(),
true/*definition*/, Var);
}
-
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index de655800fa08..0a617b999240 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This is the source level debug info generator for llvm translation.
+// This is the source level debug info generator for llvm translation.
//
//===----------------------------------------------------------------------===//
@@ -15,28 +15,35 @@
#define CLANG_CODEGEN_CGDEBUGINFO_H
#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/ValueHandle.h"
#include <map>
#include "CGBuilder.h"
+namespace llvm {
+ class MDNode;
+}
+
namespace clang {
class VarDecl;
class ObjCInterfaceDecl;
namespace CodeGen {
class CodeGenModule;
+ class CodeGenFunction;
-/// CGDebugInfo - This class gathers all debug information during compilation
-/// and is responsible for emitting to llvm globals or pass directly to
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
/// the backend.
class CGDebugInfo {
CodeGenModule *M;
bool isMainCompileUnitCreated;
llvm::DIFactory DebugFactory;
-
+
SourceLocation CurLoc, PrevLoc;
/// CompileUnitCache - Cache of previously constructed CompileUnits.
@@ -44,8 +51,8 @@ class CGDebugInfo {
/// TypeCache - Cache of previously constructed Types.
// FIXME: Eliminate this map. Be careful of iterator invalidation.
- std::map<void *, llvm::DIType> TypeCache;
-
+ std::map<void *, llvm::WeakVH> TypeCache;
+
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
@@ -54,8 +61,10 @@ class CGDebugInfo {
/// Helper functions for getOrCreateType.
llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
- llvm::DIType CreateCVRType(QualType Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DICompileUnit Unit);
llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DICompileUnit U);
@@ -81,12 +90,12 @@ public:
/// start of a new function.
void EmitFunctionStart(const char *Name, QualType ReturnType,
llvm::Function *Fn, CGBuilderTy &Builder);
-
+
/// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
- /// of a new block.
+ /// of a new block.
void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
-
- /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
/// block.
void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
@@ -95,23 +104,36 @@ public:
void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
CGBuilderTy &Builder);
+ /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+ /// imported variable declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
+ llvm::Value *AI,
+ CGBuilderTy &Builder,
+ CodeGenFunction *CGF);
+
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
CGBuilderTy &Builder);
-
+
/// EmitGlobalVariable - Emit information about a global variable.
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
/// EmitGlobalVariable - Emit information about an objective-c interface.
void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
-
+
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
CGBuilderTy &Builder);
-
-
+
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
+ CGBuilderTy &Builder, CodeGenFunction *CGF);
+
+ /// getContext - Get context info for the decl.
+ llvm::DIDescriptor getContext(const VarDecl *Decl,llvm::DIDescriptor &CU);
+
/// getOrCreateCompileUnit - Get the compile unit from the cache or create a
/// new one if necessary.
llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc);
@@ -119,8 +141,12 @@ private:
/// getOrCreateType - Get the type from the cache or create a new type if
/// necessary.
llvm::DIType getOrCreateType(QualType Ty, llvm::DICompileUnit Unit);
+
+ /// CreateTypeNode - Create type metadata for a source language type.
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DICompileUnit Unit);
};
} // namespace CodeGen
} // namespace clang
+
#endif
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 2ae7e225ebbf..7feff83dee6b 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -35,22 +35,22 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
- case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
// None of these decls require codegen support.
return;
-
+
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
- assert(VD.isBlockVarDecl() &&
+ assert(VD.isBlockVarDecl() &&
"Should not see file-scope variables inside a function!");
return EmitBlockVarDecl(VD);
}
-
+
case Decl::Typedef: { // typedef int X;
const TypedefDecl &TD = cast<TypedefDecl>(D);
QualType Ty = TD.getUnderlyingType();
-
+
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
}
@@ -62,7 +62,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
if (D.hasAttr<AsmLabelAttr>())
CGM.ErrorUnsupported(&D, "__asm__");
-
+
switch (D.getStorageClass()) {
case VarDecl::None:
case VarDecl::Auto:
@@ -95,27 +95,28 @@ CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl))
ContextName = CGM.getMangledName(FD);
else if (isa<ObjCMethodDecl>(CurFuncDecl))
- ContextName = std::string(CurFn->getNameStart(),
- CurFn->getNameStart() + CurFn->getNameLen());
+ ContextName = CurFn->getName();
else
assert(0 && "Unknown context for block var decl");
-
+
Name = ContextName + Separator + D.getNameAsString();
}
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
- return new llvm::GlobalVariable(LTy, Ty.isConstant(getContext()), Linkage,
- llvm::Constant::getNullValue(LTy), Name,
- &CGM.getModule(), D.isThreadSpecified(),
- Ty.getAddressSpace());
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
+ D.isThreadSpecified(), Ty.getAddressSpace());
+ GV->setAlignment(getContext().getDeclAlignInBytes(&D));
+ return GV;
}
-void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
-
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
-
- llvm::GlobalVariable *GV =
+
+ llvm::GlobalVariable *GV =
CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);
// Store into LocalDeclMap before generating initializer to handle
@@ -123,14 +124,11 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
DMEntry = GV;
// Make sure to evaluate VLA bounds now so that we have them for later.
+ //
+ // FIXME: Can this happen?
if (D.getType()->isVariablyModifiedType())
EmitVLASize(D.getType());
- if (D.getType()->isReferenceType()) {
- CGM.ErrorUnsupported(&D, "static declaration with reference type");
- return;
- }
-
if (D.getInit()) {
llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
@@ -140,7 +138,7 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
if (!getContext().getLangOptions().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
else
- GenerateStaticCXXBlockVarDeclInit(D, GV);
+ EmitStaticCXXBlockVarDeclInit(D, GV);
} else {
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
@@ -148,23 +146,24 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
// in the LLVM type system.)
if (GV->getType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
-
- GV = new llvm::GlobalVariable(Init->getType(), OldGV->isConstant(),
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
- &CGM.getModule(), D.isThreadSpecified(),
+ 0, D.isThreadSpecified(),
D.getType().getAddressSpace());
// Steal the name of the old global
GV->takeName(OldGV);
// Replace all uses of the old global with the new global
- llvm::Constant *NewPtrForOldDecl =
+ llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
- }
+ }
GV->setInitializer(Init);
}
@@ -174,14 +173,14 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
SourceManager &SM = CGM.getContext().getSourceManager();
llvm::Constant *Ann =
- CGM.EmitAnnotateAttr(GV, AA,
+ CGM.EmitAnnotateAttr(GV, AA,
SM.getInstantiationLineNumber(D.getLocation()));
CGM.AddAnnotation(Ann);
}
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
GV->setSection(SA->getName());
-
+
if (D.hasAttr<UsedAttr>())
CGM.AddUsedGlobal(GV);
@@ -202,7 +201,13 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
}
}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
/// BuildByRefType - This routine changes a __block variable declared as T x
/// into:
///
@@ -211,32 +216,91 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
/// void *__forwarding;
/// int32_t __flags;
/// int32_t __size;
-/// void *__copy_helper;
-/// void *__destroy_helper;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
/// T x;
/// } x
///
-/// Align is the alignment needed in bytes for x.
-const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty,
- uint64_t Align) {
- const llvm::Type *LTy = ConvertType(Ty);
- bool needsCopyDispose = BlockRequiresCopying(Ty);
- std::vector<const llvm::Type *> Types(needsCopyDispose*2+5);
- const llvm::PointerType *PtrToInt8Ty
- = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- Types[0] = PtrToInt8Ty;
- Types[1] = PtrToInt8Ty;
- Types[2] = llvm::Type::Int32Ty;
- Types[3] = llvm::Type::Int32Ty;
- if (needsCopyDispose) {
- Types[4] = PtrToInt8Ty;
- Types[5] = PtrToInt8Ty;
+const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+ std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ std::vector<const llvm::Type *> Types;
+
+ const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+ llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+
+ // void *__isa;
+ Types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+
+ // int32_t __flags;
+ Types.push_back(llvm::Type::getInt32Ty(VMContext));
+
+ // int32_t __size;
+ Types.push_back(llvm::Type::getInt32Ty(VMContext));
+
+ bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ Types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ Types.push_back(Int8PtrTy);
}
- // FIXME: Align this on at least an Align boundary, assert if we can't.
- assert((Align <= unsigned(Target.getPointerAlign(0))/8)
- && "Can't align more than pointer yet");
- Types[needsCopyDispose*2 + 4] = LTy;
- return llvm::StructType::get(Types, false);
+
+ bool Packed = false;
+ unsigned Align = getContext().getDeclAlignInBytes(D);
+ if (Align > Target.getPointerAlign(0) / 8) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align);
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ // FIXME: We need a sema error for alignment larger than the minimum of the
+ // maximal stack alignmint and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ Types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ Types.push_back(ConvertType(Ty));
+
+ const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+
+ cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
+ CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
+ ByRefTypeHolder.get());
+
+ Info.first = ByRefTypeHolder.get();
+
+ Info.second = Types.size() - 1;
+
+ return Info.first;
}
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
@@ -255,10 +319,10 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
const llvm::Type *LTy = ConvertTypeForMem(Ty);
Align = getContext().getDeclAlignInBytes(&D);
if (isByRef)
- LTy = BuildByRefType(Ty, Align);
+ LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString().c_str());
-
+
if (isByRef)
Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
Alloc->setAlignment(Align);
@@ -267,48 +331,55 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
- DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
llvm::GlobalValue
::InternalLinkage);
}
-
+
+ // FIXME: Can this happen?
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
} else {
+ EnsureInsertPoint();
+
if (!DidCallStackSave) {
// Save the stack.
- const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
-
+
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
-
+
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
-
+
{
// Push a cleanup block and restore the stack there.
CleanupScope scope(*this);
-
+
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
Builder.CreateCall(F, V);
}
}
-
+
// Get the element type.
- const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
llvm::Value *VLASize = EmitVLASize(Ty);
// Downcast the VLA size expression
- VLASize = Builder.CreateIntCast(VLASize, llvm::Type::Int32Ty, false, "tmp");
-
+ VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
+ false, "tmp");
+
// Allocate memory for the array.
- llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::Int8Ty, VLASize, "vla");
+ llvm::AllocaInst *VLA =
+ Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+ VLA->setAlignment(getContext().getDeclAlignInBytes(&D));
+
DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
}
@@ -318,35 +389,39 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
// Emit debug info for local var declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
+ assert(HaveInsertPoint() && "Unexpected unreachable point!");
+
DI->setLocation(D.getLocation());
if (Target.useGlobalsForAutomaticVariables()) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
- }
- else if (isByRef) {
- llvm::Value *Loc;
- bool needsCopyDispose = BlockRequiresCopying(Ty);
- Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc, false);
- Loc = Builder.CreateBitCast(Loc, DeclPtr->getType());
- Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x");
- DI->EmitDeclareOfAutoVariable(&D, Loc, Builder);
} else
DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
}
// If this local has an initializer, emit it now.
- if (const Expr *Init = D.getInit()) {
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!ContainsLabel(Init))
+ Init = 0;
+ else
+ EnsureInsertPoint();
+ }
+
+ if (Init) {
llvm::Value *Loc = DeclPtr;
- if (isByRef) {
- bool needsCopyDispose = BlockRequiresCopying(Ty);
- Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x");
- }
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
if (Ty->isReferenceType()) {
- llvm::Value *V = EmitReferenceBindingToExpr(Init, Ty).getScalarVal();
- EmitStoreOfScalar(V, Loc, false, Ty);
+ RValue RV = EmitReferenceBindingToExpr(Init, Ty, /*IsInitializer=*/true);
+ EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
+ EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
D.getType());
} else if (Init->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
@@ -354,10 +429,11 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
}
}
+
if (isByRef) {
- const llvm::PointerType *PtrToInt8Ty
- = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ EnsureInsertPoint();
llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
@@ -383,19 +459,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
int isa = 0;
if (flag&BLOCK_FIELD_IS_WEAK)
isa = 1;
- V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa);
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
Builder.CreateStore(V, isa_field);
- V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding");
- Builder.CreateStore(V, forwarding_field);
+ Builder.CreateStore(DeclPtr, forwarding_field);
- V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags);
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
Builder.CreateStore(V, flags_field);
const llvm::Type *V1;
V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
- V = llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
(CGM.getTargetData().getTypeStoreSizeInBits(V1)
/ 8));
Builder.CreateStore(V, size_field);
@@ -413,13 +488,29 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
}
+ // Handle CXX destruction of variables.
+ QualType DtorTy(Ty);
+ if (const ArrayType *Array = DtorTy->getAs<ArrayType>())
+ DtorTy = Array->getElementType();
+ if (const RecordType *RT = DtorTy->getAs<RecordType>())
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->hasTrivialDestructor()) {
+ const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
+ assert(D && "EmitLocalBlockVarDecl - destructor is nul");
+ assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI");
+
+ CleanupScope scope(*this);
+ EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+ }
+ }
+
// Handle the cleanup attribute
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
const FunctionDecl *FD = CA->getFunctionDecl();
-
- llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
+
+ llvm::Constant* F = CGM.GetAddrOfFunction(FD);
assert(F && "Could not find function!");
-
+
CleanupScope scope(*this);
const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
@@ -428,15 +519,15 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
// the type of the pointer. An example of this is
// void f(void* arg);
// __attribute__((cleanup(f))) void *g;
- //
+ //
// To fix this we insert a bitcast here.
QualType ArgTy = Info.arg_begin()->type;
DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
-
+
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(DeclPtr),
+ Args.push_back(std::make_pair(RValue::get(DeclPtr),
getContext().getPointerType(D.getType())));
-
+
EmitCall(Info, F, Args);
}
@@ -448,14 +539,14 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
}
-/// Emit an alloca (or GlobalValue depending on target)
+/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
QualType Ty = D.getType();
-
+
llvm::Value *DeclPtr;
if (!Ty->isConstantSizeType()) {
// Variable sized values always are passed by-reference.
@@ -469,7 +560,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
Name += ".addr";
DeclPtr = CreateTempAlloca(LTy);
DeclPtr->setName(Name.c_str());
-
+
// Store the initial value into the alloca.
EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty);
} else {
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 0951019f0108..2834dfeb780a 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -46,33 +46,38 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
/// EmitAnyExpr - Emit code to compute the specified expression which can have
/// any type. The result is returned as an RValue struct. If this is an
-/// aggregate expression, the aggloc/agglocvolatile arguments indicate where
-/// the result should be returned.
-RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
- bool isAggLocVolatile, bool IgnoreResult) {
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+ bool IsAggLocVolatile, bool IgnoreResult,
+ bool IsInitializer) {
if (!hasAggregateLLVMType(E->getType()))
return RValue::get(EmitScalarExpr(E, IgnoreResult));
else if (E->getType()->isAnyComplexType())
return RValue::getComplex(EmitComplexExpr(E, false, false,
IgnoreResult, IgnoreResult));
-
- EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult);
- return RValue::getAggregate(AggLoc, isAggLocVolatile);
+
+ EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
+ return RValue::getAggregate(AggLoc, IsAggLocVolatile);
}
-/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result
-/// will always be accessible even if no aggregate location is
-/// provided.
-RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc,
- bool isAggLocVolatile) {
- if (!AggLoc && hasAggregateLLVMType(E->getType()) &&
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
+ bool IsAggLocVolatile,
+ bool IsInitializer) {
+ llvm::Value *AggLoc = 0;
+
+ if (hasAggregateLLVMType(E->getType()) &&
!E->getType()->isAnyComplexType())
AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp");
- return EmitAnyExpr(E, AggLoc, isAggLocVolatile);
+ return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
+ IsInitializer);
}
RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
- QualType DestType) {
+ QualType DestType,
+ bool IsInitializer) {
RValue Val;
if (E->isLvalue(getContext()) == Expr::LV_Valid) {
// Emit the expr as an lvalue.
@@ -81,14 +86,33 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
return RValue::get(LV.getAddress());
Val = EmitLoadOfLValue(LV, E->getType());
} else {
- Val = EmitAnyExprToTemp(E);
+ // FIXME: Initializers don't work with casts yet. For example
+ // const A& a = B();
+ // if B inherits from A.
+ Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
+ IsInitializer);
+
+ if (IsInitializer) {
+ // We might have to destroy the temporary variable.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->hasTrivialDestructor()) {
+ const CXXDestructorDecl *Dtor =
+ ClassDecl->getDestructor(getContext());
+
+ CleanupScope scope(*this);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, Val.getAggregateAddr());
+ }
+ }
+ }
+ }
}
if (Val.isAggregate()) {
Val = RValue::get(Val.getAggregateAddr());
} else {
// Create a temporary variable that we can bind the reference to.
- llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()),
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()),
"reftmp");
if (Val.isScalar())
EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
@@ -101,13 +125,13 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
}
-/// getAccessedFieldNo - Given an encoded value and a result number, return
-/// the input field number being accessed.
-unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
const llvm::Constant *Elts) {
if (isa<llvm::ConstantAggregateZero>(Elts))
return 0;
-
+
return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
}
@@ -119,7 +143,7 @@ unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
if (Ty->isVoidType()) {
return RValue::get(0);
- } else if (const ComplexType *CTy = Ty->getAsComplexType()) {
+ } else if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
const llvm::Type *EltTy = ConvertType(CTy->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
@@ -142,38 +166,37 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
ErrorUnsupported(E, Name);
llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
return LValue::MakeAddr(llvm::UndefValue::get(Ty),
- E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ MakeQualifiers(E->getType()));
}
/// EmitLValue - Emit code to compute a designator that specifies the location
/// of the expression.
///
-/// This can return one of two things: a simple address or a bitfield
-/// reference. In either case, the LLVM Value* in the LValue structure is
-/// guaranteed to be an LLVM pointer type.
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
///
-/// If this returns a bitfield reference, nothing about the pointee type of
-/// the LLVM value is known: For example, it may not be a pointer to an
-/// integer.
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
///
-/// If this returns a normal address, and if the lvalue's C type is fixed
-/// size, this method guarantees that the returned pointer type will point to
-/// an LLVM type of the same size of the lvalue's type. If the lvalue has a
-/// variable length type, this is not possible.
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
///
LValue CodeGenFunction::EmitLValue(const Expr *E) {
switch (E->getStmtClass()) {
default: return EmitUnsupportedLValue(E, "l-value expression");
- case Expr::BinaryOperatorClass:
+ case Expr::BinaryOperatorClass:
return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
- case Expr::CallExprClass:
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
case Expr::CXXOperatorCallExprClass:
return EmitCallExprLValue(cast<CallExpr>(E));
case Expr::VAArgExprClass:
return EmitVAArgExprLValue(cast<VAArgExpr>(E));
- case Expr::DeclRefExprClass:
+ case Expr::DeclRefExprClass:
case Expr::QualifiedDeclRefExprClass:
return EmitDeclRefLValue(cast<DeclRefExpr>(E));
case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
@@ -184,7 +207,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::ObjCEncodeExprClass:
return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
- case Expr::BlockDeclRefExprClass:
+ case Expr::BlockDeclRefExprClass:
return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
case Expr::CXXConditionDeclExprClass:
@@ -194,31 +217,34 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
case Expr::CXXBindTemporaryExprClass:
return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::CXXExprWithTemporariesClass:
+ return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
case Expr::ObjCMessageExprClass:
return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
- case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCIvarRefExprClass:
return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
case Expr::ObjCPropertyRefExprClass:
return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
- case Expr::ObjCKVCRefExprClass:
- return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E));
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
case Expr::ObjCSuperExprClass:
return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
case Expr::StmtExprClass:
return EmitStmtExprLValue(cast<StmtExpr>(E));
- case Expr::UnaryOperatorClass:
+ case Expr::UnaryOperatorClass:
return EmitUnaryOpLValue(cast<UnaryOperator>(E));
case Expr::ArraySubscriptExprClass:
return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
case Expr::ExtVectorElementExprClass:
return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
- case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
case Expr::CompoundLiteralExprClass:
return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
case Expr::ConditionalOperatorClass:
- return EmitConditionalOperator(cast<ConditionalOperator>(E));
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
case Expr::ChooseExprClass:
return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
case Expr::ImplicitCastExprClass:
@@ -238,55 +264,54 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
// Bool can have different representation in memory than in registers.
if (Ty->isBooleanType())
- if (V->getType() != llvm::Type::Int1Ty)
- V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool");
-
+ if (V->getType() != llvm::Type::getInt1Ty(VMContext))
+ V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+
return V;
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, QualType Ty) {
-
+
if (Ty->isBooleanType()) {
// Bool can have different representation in memory than in registers.
const llvm::Type *SrcTy = Value->getType();
const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
if (DstPtr->getElementType() != SrcTy) {
- const llvm::Type *MemTy =
+ const llvm::Type *MemTy =
llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace());
Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
}
}
-
- Builder.CreateStore(Value, Addr, Volatile);
+ Builder.CreateStore(Value, Addr, Volatile);
}
-/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
-/// this method emits the address of the lvalue, then loads the result as an
-/// rvalue, returning the rvalue.
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
if (LV.isObjCWeak()) {
- // load of a __weak object.
+ // load of a __weak object.
llvm::Value *AddrWeakObj = LV.getAddress();
- llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj);
return RValue::get(read_weak);
}
-
+
if (LV.isSimple()) {
llvm::Value *Ptr = LV.getAddress();
const llvm::Type *EltTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
-
+
// Simple scalar l-value.
if (EltTy->isSingleValueType())
- return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+ return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
ExprType));
-
+
assert(ExprType->isFunctionType() && "Unknown scalar value");
return RValue::get(Ptr);
}
-
+
if (LV.isVectorElt()) {
llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
LV.isVolatileQualified(), "tmp");
@@ -315,59 +340,58 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
unsigned BitfieldSize = LV.getBitfieldSize();
llvm::Value *Ptr = LV.getBitfieldAddr();
- const llvm::Type *EltTy =
+ const llvm::Type *EltTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
- // In some cases the bitfield may straddle two memory locations.
- // Currently we load the entire bitfield, then do the magic to
- // sign-extend it if necessary. This results in somewhat more code
- // than necessary for the common case (one load), since two shifts
- // accomplish both the masking and sign extension.
+ // In some cases the bitfield may straddle two memory locations. Currently we
+ // load the entire bitfield, then do the magic to sign-extend it if
+ // necessary. This results in somewhat more code than necessary for the common
+ // case (one load), since two shifts accomplish both the masking and sign
+ // extension.
unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp");
-
+
// Shift to proper location.
if (StartBit)
- Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit),
+ Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit),
"bf.lo");
-
+
// Mask off unused bits.
- llvm::Constant *LowMask =
- llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits));
+ llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext,
+ llvm::APInt::getLowBitsSet(EltTySize, LowBits));
Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
-
+
// Fetch the high bits if necessary.
if (LowBits < BitfieldSize) {
unsigned HighBits = BitfieldSize - LowBits;
- llvm::Value *HighPtr =
- Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
- "bf.ptr.hi");
- llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
LV.isVolatileQualified(),
"tmp");
-
+
// Mask off unused bits.
- llvm::Constant *HighMask =
- llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+ llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext,
+ llvm::APInt::getLowBitsSet(EltTySize, HighBits));
HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
// Shift to proper location and or in to bitfield value.
- HighVal = Builder.CreateShl(HighVal,
+ HighVal = Builder.CreateShl(HighVal,
llvm::ConstantInt::get(EltTy, LowBits));
Val = Builder.CreateOr(Val, HighVal, "bf.val");
}
// Sign extend if necessary.
if (LV.isBitfieldSigned()) {
- llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
+ llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
EltTySize - BitfieldSize);
- Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
+ Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
ExtraBits, "bf.val.sext");
}
- // The bitfield type and the normal type differ when the storage sizes
- // differ (currently just _Bool).
+ // The bitfield type and the normal type differ when the storage sizes differ
+ // (currently just _Bool).
Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp");
return RValue::get(Val);
@@ -389,27 +413,29 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
QualType ExprType) {
llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
LV.isVolatileQualified(), "tmp");
-
+
const llvm::Constant *Elts = LV.getExtVectorElts();
-
- // If the result of the expression is a non-vector type, we must be
- // extracting a single element. Just codegen as an extractelement.
- const VectorType *ExprVT = ExprType->getAsVectorType();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = ExprType->getAs<VectorType>();
if (!ExprVT) {
unsigned InIdx = getAccessedFieldNo(0, Elts);
- llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ llvm::Value *Elt = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), InIdx);
return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
}
// Always use shuffle vector to try to retain the original program structure
unsigned NumResultElts = ExprVT->getNumElements();
-
+
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumResultElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), InIdx));
}
-
+
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(Vec,
llvm::UndefValue::get(Vec->getType()),
@@ -422,7 +448,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
QualType Ty) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
@@ -434,7 +460,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
return;
}
-
+
// If this is an update of extended vector elements, insert them as
// appropriate.
if (Dst.isExtVectorElt())
@@ -451,58 +477,60 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
assert(0 && "Unknown LValue type");
}
-
+
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
- // load of a __weak object.
+ // load of a __weak object.
llvm::Value *LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
}
-
+
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
- // load of a __strong object.
+ // load of a __strong object.
llvm::Value *LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
-#if 0
- // FIXME. We cannot positively determine if we have an 'ivar' assignment,
- // object assignment or an unknown assignment. For now, generate call to
- // objc_assign_strongCast assignment which is a safe, but consevative
- // assumption.
- if (Dst.isObjCIvar())
- CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst);
- else
- CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
-#endif
- if (Dst.isGlobalObjCRef())
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+ llvm::Value *dst = RHS;
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ }
+ else if (Dst.isGlobalObjCRef())
CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
else
CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
return;
}
-
+
assert(Src.isScalar() && "Can't emit an agg store with this method");
EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
Dst.isVolatileQualified(), Ty);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
- QualType Ty,
+ QualType Ty,
llvm::Value **Result) {
unsigned StartBit = Dst.getBitfieldStartBit();
unsigned BitfieldSize = Dst.getBitfieldSize();
llvm::Value *Ptr = Dst.getBitfieldAddr();
- const llvm::Type *EltTy =
+ const llvm::Type *EltTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
- // Get the new value, cast to the appropriate type and masked to
- // exactly the size of the bit-field.
+ // Get the new value, cast to the appropriate type and masked to exactly the
+ // size of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
- llvm::Constant *Mask =
- llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
+ llvm::Constant *Mask = llvm::ConstantInt::get(VMContext,
+ llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
// Return the new value of the bit-field, if requested.
@@ -517,61 +545,60 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
SrcTySize - BitfieldSize);
- SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
+ SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
ExtraBits, "bf.reload.sext");
}
*Result = SrcTrunc;
}
- // In some cases the bitfield may straddle two memory locations.
- // Emit the low part first and check to see if the high needs to be
- // done.
+ // In some cases the bitfield may straddle two memory locations. Emit the low
+ // part first and check to see if the high needs to be done.
unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
"bf.prev.low");
// Compute the mask for zero-ing the low part of this bitfield.
- llvm::Constant *InvMask =
- llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit,
- StartBit + LowBits));
-
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(VMContext,
+ ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits));
+
// Compute the new low part as
// LowVal = (LowVal & InvMask) | (NewVal << StartBit),
// with the shift of NewVal implicitly stripping the high bits.
- llvm::Value *NewLowVal =
- Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit),
- "bf.value.lo");
+ llvm::Value *NewLowVal =
+ Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit),
+ "bf.value.lo");
LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
-
+
// Write back.
Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified());
// If the low part doesn't cover the bitfield emit a high part.
if (LowBits < BitfieldSize) {
unsigned HighBits = BitfieldSize - LowBits;
- llvm::Value *HighPtr =
- Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1),
- "bf.ptr.hi");
- llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+ llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
+ llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
Dst.isVolatileQualified(),
"bf.prev.hi");
-
+
// Compute the mask for zero-ing the high part of this bitfield.
- llvm::Constant *InvMask =
- llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits));
-
+ llvm::Constant *InvMask =
+ llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize,
+ HighBits));
+
// Compute the new high part as
// HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
// where the high bits of NewVal have already been cleared and the
// shift stripping the low bits.
- llvm::Value *NewHighVal =
- Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits),
- "bf.value.high");
+ llvm::Value *NewHighVal =
+ Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits),
+ "bf.value.high");
HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
-
+
// Write back.
Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified());
}
@@ -597,29 +624,29 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
Dst.isVolatileQualified(), "tmp");
const llvm::Constant *Elts = Dst.getExtVectorElts();
-
+
llvm::Value *SrcVal = Src.getScalarVal();
-
- if (const VectorType *VTy = Ty->getAsVectorType()) {
+
+ if (const VectorType *VTy = Ty->getAs<VectorType>()) {
unsigned NumSrcElts = VTy->getNumElements();
unsigned NumDstElts =
cast<llvm::VectorType>(Vec->getType())->getNumElements();
if (NumDstElts == NumSrcElts) {
- // Use shuffle vector is the src and destination are the same number
- // of elements and restore the vector mask since it is on the side
- // it will be stored.
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned InIdx = getAccessedFieldNo(i, Elts);
- Mask[InIdx] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ Mask[InIdx] = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i);
}
-
+
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(Vec->getType()),
MaskV, "tmp");
- }
- else if (NumDstElts > NumSrcElts) {
+ } else if (NumDstElts > NumSrcElts) {
// Extended the source vector to the same length and then shuffle it
// into the destination.
// FIXME: since we're shuffling with undef, can we just use the indices
@@ -627,96 +654,153 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
llvm::SmallVector<llvm::Constant*, 4> ExtMask;
unsigned i;
for (i = 0; i != NumSrcElts; ++i)
- ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ ExtMask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i));
for (; i != NumDstElts; ++i)
- ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty));
+ ExtMask.push_back(llvm::UndefValue::get(
+ llvm::Type::getInt32Ty(VMContext)));
llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
ExtMask.size());
- llvm::Value *ExtSrcVal =
+ llvm::Value *ExtSrcVal =
Builder.CreateShuffleVector(SrcVal,
llvm::UndefValue::get(SrcVal->getType()),
ExtMaskV, "tmp");
// build identity
llvm::SmallVector<llvm::Constant*, 4> Mask;
for (unsigned i = 0; i != NumDstElts; ++i) {
- Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i));
+ Mask.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i));
}
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned Idx = getAccessedFieldNo(i, Elts);
- Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts);
+ Mask[Idx] = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i+NumDstElts);
}
llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
- }
- else {
+ } else {
// We should never shorten the vector
assert(0 && "unexpected shorten vector length");
}
} else {
// If the Src is a scalar (not a vector) it must be updating one element.
unsigned InIdx = getAccessedFieldNo(0, Elts);
- llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx);
+ llvm::Value *Elt = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), InIdx);
Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
}
-
+
Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
}
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static
+void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV) {
+ if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ LV.SetObjCIvar(LV, true);
+ ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ return;
+ }
+ if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
+ VD->isFileVarDecl())
+ LV.SetGlobalObjCRef(LV, true);
+ }
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ }
+ else if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E))
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ else if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.SetObjCIvar(LV, false);
+ }
+ }
+ else if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E))
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ else if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E))
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+ else if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.SetObjCIvar(LV, false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.SetGlobalObjCRef(LV, false);
+ }
+ else if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.SetObjCArray(LV, E->getType()->isArrayType());
+ }
+}
+
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl());
-
+
if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) ||
isa<ImplicitParamDecl>(VD))) {
LValue LV;
- bool NonGCable = VD->hasLocalStorage() &&
+ bool NonGCable = VD->hasLocalStorage() &&
!VD->hasAttr<BlocksAttr>();
if (VD->hasExternalStorage()) {
llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
- }
- else {
+ LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ } else {
llvm::Value *V = LocalDeclMap[VD];
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+ Qualifiers Quals = MakeQualifiers(E->getType());
// local variables do not get their gc attribute set.
- QualType::GCAttrTypes attr = QualType::GCNone;
// local static?
- if (!NonGCable)
- attr = getContext().getObjCGCAttrKind(E->getType());
+ if (NonGCable) Quals.removeObjCGCAttr();
+
if (VD->hasAttr<BlocksAttr>()) {
- bool needsCopyDispose = BlockRequiresCopying(VD->getType());
- const llvm::Type *PtrStructTy = V->getType();
- const llvm::Type *Ty = PtrStructTy;
- Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateStructGEP(V, 1, "forwarding");
- V = Builder.CreateBitCast(V, Ty);
V = Builder.CreateLoad(V, false);
- V = Builder.CreateBitCast(V, PtrStructTy);
- V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x");
+ V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+ VD->getNameAsString());
}
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr);
+ LV = LValue::MakeAddr(V, Quals);
}
LValue::SetObjCNonGC(LV, NonGCable);
+ setObjCGCLValueClass(getContext(), E, LV);
return LV;
} else if (VD && VD->isFileVarDecl()) {
llvm::Value *V = CGM.GetAddrOfGlobalVar(VD);
if (VD->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
- if (LV.isObjCStrong())
- LV.SetGlobalObjCRef(LV, true);
+ LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ setObjCGCLValueClass(getContext(), E, LV);
return LV;
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) {
- llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD));
+ llvm::Value* V = CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
- FD->getType()->getAsFunctionProtoType()) {
+ FD->getType()->getAs<FunctionProtoType>()) {
// Ugly case: for a K&R-style definition, the type of the definition
// isn't the same as the type of a use. Correct for this with a
// bitcast.
@@ -726,15 +810,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp");
}
}
- return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
- }
- else if (const ImplicitParamDecl *IPD =
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+ } else if (const ImplicitParamDecl *IPD =
dyn_cast<ImplicitParamDecl>(E->getDecl())) {
llvm::Value *V = LocalDeclMap[IPD];
assert(V && "BlockVarDecl not entered in LocalDeclMap?");
- return LValue::MakeAddr(V, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
}
assert(0 && "Unimp declref");
//an invalid LValue, but the assert will
@@ -743,27 +824,26 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
}
LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
- return LValue::MakeAddr(GetAddrOfBlockDecl(E),
- E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType()));
}
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __extension__ doesn't affect lvalue-ness.
if (E->getOpcode() == UnaryOperator::Extension)
return EmitLValue(E->getSubExpr());
-
+
QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
switch (E->getOpcode()) {
default: assert(0 && "Unknown unary operator lvalue!");
case UnaryOperator::Deref:
{
- QualType T =
- E->getSubExpr()->getType()->getAsPointerType()->getPointeeType();
- LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()),
- ExprTy->getAsPointerType()->getPointeeType()
- .getCVRQualifiers(),
- getContext().getObjCGCAttrKind(T));
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ Qualifiers Quals = MakeQualifiers(T);
+ Quals.setAddressSpace(ExprTy.getAddressSpace());
+
+ LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
// We should not generate __weak write barrier on indirect reference
// of a pointer to object; as in void foo (__weak id *param); *param = 0;
// But, we continue to generate __strong write barrier on indirect write
@@ -780,16 +860,18 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
Idx, "idx"),
- ExprTy.getCVRQualifiers());
+ MakeQualifiers(ExprTy));
}
}
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
- return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0);
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E),
+ Qualifiers());
}
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
- return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0);
+ return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ Qualifiers());
}
@@ -806,32 +888,25 @@ LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
GlobalVarName = "__FUNCTION__.";
break;
case PredefinedExpr::PrettyFunction:
- // FIXME:: Demangle C++ method names
GlobalVarName = "__PRETTY_FUNCTION__.";
break;
}
- // FIXME: This isn't right at all. The logic for computing this should go
- // into a method on PredefinedExpr. This would allow sema and codegen to be
- // consistent for things like sizeof(__func__) etc.
- std::string FunctionName;
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
- FunctionName = CGM.getMangledName(FD);
- } else {
- // Just get the mangled name; skipping the asm prefix if it
- // exists.
- FunctionName = CurFn->getName();
- if (FunctionName[0] == '\01')
- FunctionName = FunctionName.substr(1, std::string::npos);
- }
+ llvm::StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
- GlobalVarName += FunctionName;
- llvm::Constant *C =
+ std::string FunctionName =
+ PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type,
+ CurCodeDecl);
+
+ llvm::Constant *C =
CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
- return LValue::MakeAddr(C, 0);
+ return LValue::MakeAddr(C, Qualifiers());
}
-LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
switch (E->getIdentType()) {
default:
return EmitUnsupportedLValue(E, "predefined expression");
@@ -854,68 +929,78 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, "vidx");
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
- E->getBase()->getType().getCVRQualifiers());
+ E->getBase()->getType().getCVRQualifiers());
}
-
+
// The base must be a pointer, which is not an aggregate. Emit it.
llvm::Value *Base = EmitScalarExpr(E->getBase());
-
+
// Extend or truncate the index type to 32 or 64-bits.
unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
if (IdxBitwidth != LLVMPointerWidth)
- Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth),
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth),
IdxSigned, "idxprom");
- // We know that the pointer points to a type of the correct size,
- // unless the size is a VLA or Objective-C interface.
+ // We know that the pointer points to a type of the correct size, unless the
+ // size is a VLA or Objective-C interface.
llvm::Value *Address = 0;
- if (const VariableArrayType *VAT =
+ if (const VariableArrayType *VAT =
getContext().getAsVariableArrayType(E->getType())) {
- llvm::Value *VLASize = VLASizeMap[VAT];
-
+ llvm::Value *VLASize = GetVLASize(VAT);
+
Idx = Builder.CreateMul(Idx, VLASize);
-
+
QualType BaseType = getContext().getBaseElementType(VAT);
-
+
uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8;
Idx = Builder.CreateUDiv(Idx,
- llvm::ConstantInt::get(Idx->getType(),
+ llvm::ConstantInt::get(Idx->getType(),
BaseTypeSize));
- Address = Builder.CreateGEP(Base, Idx, "arrayidx");
- } else if (const ObjCInterfaceType *OIT =
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ } else if (const ObjCInterfaceType *OIT =
dyn_cast<ObjCInterfaceType>(E->getType())) {
- llvm::Value *InterfaceSize =
+ llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
getContext().getTypeSize(OIT) / 8);
-
+
Idx = Builder.CreateMul(Idx, InterfaceSize);
- llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
- Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+ const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+ Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
Idx, "arrayidx");
Address = Builder.CreateBitCast(Address, Base->getType());
} else {
- Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
}
-
- QualType T = E->getBase()->getType()->getAsPointerType()->getPointeeType();
- LValue LV = LValue::MakeAddr(Address,
- T.getCVRQualifiers(),
- getContext().getObjCGCAttrKind(T));
+
+ QualType T = E->getBase()->getType()->getPointeeType();
+ assert(!T.isNull() &&
+ "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+ Qualifiers Quals = MakeQualifiers(T);
+ Quals.setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+ LValue LV = LValue::MakeAddr(Address, Quals);
if (getContext().getLangOptions().ObjC1 &&
- getContext().getLangOptions().getGCMode() != LangOptions::NonGC)
+ getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
return LV;
}
-static
-llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) {
+static
+llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+ llvm::SmallVector<unsigned, 4> &Elts) {
llvm::SmallVector<llvm::Constant *, 4> CElts;
-
+
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
- CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i]));
+ CElts.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), Elts[i]));
return llvm::ConstantVector::get(&CElts[0], CElts.size());
}
@@ -930,9 +1015,11 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
assert(E->getBase()->getType()->isVectorType());
Base = EmitLValue(E->getBase());
} else {
- const PointerType *PT = E->getBase()->getType()->getAsPointerType();
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
llvm::Value *Ptr = EmitScalarExpr(E->getBase());
- Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers());
+ Qualifiers Quals = MakeQualifiers(PT->getPointeeType());
+ Quals.removeObjCGCAttr();
+ Base = LValue::MakeAddr(Ptr, Quals);
}
// Encode the element access list into a vector of unsigned indices.
@@ -940,9 +1027,9 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
E->getEncodedElementAccess(Indices);
if (Base.isSimple()) {
- llvm::Constant *CV = GenerateConstantVector(Indices);
+ llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
return LValue::MakeExtVectorElt(Base.getAddress(), CV,
- Base.getQualifiers());
+ Base.getVRQualifiers());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -951,68 +1038,69 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
if (isa<llvm::ConstantAggregateZero>(BaseElts))
- CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ CElts.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0));
else
CElts.push_back(BaseElts->getOperand(Indices[i]));
}
llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
- Base.getQualifiers());
+ Base.getVRQualifiers());
}
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
bool isUnion = false;
- bool isIvar = false;
bool isNonGC = false;
Expr *BaseExpr = E->getBase();
llvm::Value *BaseValue = NULL;
- unsigned CVRQualifiers=0;
+ Qualifiers BaseQuals;
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
if (E->isArrow()) {
BaseValue = EmitScalarExpr(BaseExpr);
- const PointerType *PTy =
- BaseExpr->getType()->getAsPointerType();
+ const PointerType *PTy =
+ BaseExpr->getType()->getAs<PointerType>();
if (PTy->getPointeeType()->isUnionType())
isUnion = true;
- CVRQualifiers = PTy->getPointeeType().getCVRQualifiers();
- } else if (isa<ObjCPropertyRefExpr>(BaseExpr) ||
- isa<ObjCKVCRefExpr>(BaseExpr)) {
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(
+ BaseExpr->IgnoreParens())) {
RValue RV = EmitObjCPropertyGet(BaseExpr);
BaseValue = RV.getAggregateAddr();
if (BaseExpr->getType()->isUnionType())
isUnion = true;
- CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ BaseQuals = BaseExpr->getType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- if (BaseLV.isObjCIvar())
- isIvar = true;
if (BaseLV.isNonGC())
isNonGC = true;
// FIXME: this isn't right for bitfields.
BaseValue = BaseLV.getAddress();
- if (BaseExpr->getType()->isUnionType())
+ QualType BaseTy = BaseExpr->getType();
+ if (BaseTy->isUnionType())
isUnion = true;
- CVRQualifiers = BaseExpr->getType().getCVRQualifiers();
+ BaseQuals = BaseTy.getQualifiers();
}
FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl());
// FIXME: Handle non-field member expressions
assert(Field && "No code generation for non-field member references");
LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion,
- CVRQualifiers);
- LValue::SetObjCIvar(MemExpLV, isIvar);
+ BaseQuals.getCVRQualifiers());
LValue::SetObjCNonGC(MemExpLV, isNonGC);
+ setObjCGCLValueClass(getContext(), E, MemExpLV);
return MemExpLV;
}
LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
FieldDecl* Field,
unsigned CVRQualifiers) {
- unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
+
// FIXME: CodeGenTypes should expose a method to get the appropriate type for
// FieldTy (the appropriate type is ABI-dependent).
- const llvm::Type *FieldTy =
+ const llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(Field->getType());
const llvm::PointerType *BaseTy =
cast<llvm::PointerType>(BaseValue->getType());
@@ -1020,13 +1108,12 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
BaseValue = Builder.CreateBitCast(BaseValue,
llvm::PointerType::get(FieldTy, AS),
"tmp");
- llvm::Value *V = Builder.CreateGEP(BaseValue,
- llvm::ConstantInt::get(llvm::Type::Int32Ty, idx),
- "tmp");
-
- CodeGenTypes::BitFieldInfo bitFieldInfo =
- CGM.getTypes().getBitFieldInfo(Field);
- return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size,
+
+ llvm::Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo);
+ llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp");
+
+ return LValue::MakeBitfield(V, Info.Start, Info.Size,
Field->getType()->isSignedIntegerType(),
Field->getType().getCVRQualifiers()|CVRQualifiers);
}
@@ -1034,46 +1121,34 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
FieldDecl* Field,
bool isUnion,
- unsigned CVRQualifiers)
-{
+ unsigned CVRQualifiers) {
if (Field->isBitField())
return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
-
+
unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
// Match union field type.
if (isUnion) {
- const llvm::Type *FieldTy =
+ const llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(Field->getType());
- const llvm::PointerType * BaseTy =
+ const llvm::PointerType * BaseTy =
cast<llvm::PointerType>(BaseValue->getType());
unsigned AS = BaseTy->getAddressSpace();
- V = Builder.CreateBitCast(V,
- llvm::PointerType::get(FieldTy, AS),
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::get(FieldTy, AS),
"tmp");
}
if (Field->getType()->isReferenceType())
V = Builder.CreateLoad(V, "tmp");
- QualType::GCAttrTypes attr = QualType::GCNone;
- if (CGM.getLangOptions().ObjC1 &&
- CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
- QualType Ty = Field->getType();
- attr = Ty.getObjCGCAttr();
- if (attr != QualType::GCNone) {
- // __weak attribute on a field is ignored.
- if (attr == QualType::Weak)
- attr = QualType::GCNone;
- }
- else if (getContext().isObjCObjectPointerType(Ty))
- attr = QualType::Strong;
- }
- LValue LV =
- LValue::MakeAddr(V,
- Field->getType().getCVRQualifiers()|CVRQualifiers,
- attr);
- return LV;
+ Qualifiers Quals = MakeQualifiers(Field->getType());
+ Quals.addCVRQualifiers(CVRQualifiers);
+ // __weak attribute on a field is ignored.
+ if (Quals.getObjCGCAttr() == Qualifiers::Weak)
+ Quals.removeObjCGCAttr();
+
+ return LValue::MakeAddr(V, Quals);
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
@@ -1081,7 +1156,7 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral");
const Expr* InitExpr = E->getInitializer();
- LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers());
+ LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
if (E->getType()->isComplexType()) {
EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
@@ -1094,22 +1169,51 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
return Result;
}
-LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) {
- // We don't handle vectors yet.
- if (E->getType()->isVectorType())
- return EmitUnsupportedLValue(E, "conditional operator");
+LValue
+CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
+ if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
+
+ llvm::Value *Cond = EvaluateExprAsBool(E->getCond());
+ Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
+
+ EmitBlock(LHSBlock);
+
+ LValue LHS = EmitLValue(E->getLHS());
+ if (!LHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),
+ "condtmp");
+
+ Builder.CreateStore(LHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+
+ EmitBlock(RHSBlock);
+ LValue RHS = EmitLValue(E->getRHS());
+ if (!RHS.isSimple())
+ return EmitUnsupportedLValue(E, "conditional operator");
+
+ Builder.CreateStore(RHS.getAddress(), Temp);
+ EmitBranch(ContBlock);
+ EmitBlock(ContBlock);
+
+ Temp = Builder.CreateLoad(Temp, "lv");
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ }
+
// ?: here should be an aggregate.
- assert((hasAggregateLLVMType(E->getType()) &&
+ assert((hasAggregateLLVMType(E->getType()) &&
!E->getType()->isAnyComplexType()) &&
"Unexpected conditional operator!");
llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
EmitAggExpr(E, Temp, false);
- return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
-
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
}
/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code
@@ -1118,21 +1222,47 @@ LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) {
/// all the reasons that casts are permitted with aggregate result, including
/// noop aggregate casts, and cast from scalar to union.
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
- // If this is an aggregate-to-aggregate cast, just use the input's address as
- // the lvalue.
- if (getContext().hasSameUnqualifiedType(E->getType(),
- E->getSubExpr()->getType()))
+ switch (E->getCastKind()) {
+ default:
+ // If this is an lvalue cast, treat it as a no-op.
+ // FIXME: We shouldn't need to check for this explicitly!
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ if (ICE->isLvalueCast())
+ return EmitLValue(E->getSubExpr());
+
+ assert(0 && "Unhandled cast!");
+
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_ConstructorConversion:
+ case CastExpr::CK_UserDefinedConversion:
return EmitLValue(E->getSubExpr());
-
- // Otherwise, we must have a cast from scalar to union.
- assert(E->getType()->isUnionType() && "Expected scalar-to-union cast");
-
- // Casts are only lvalues when the source and destination types are the same.
- llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
- EmitAnyExpr(E->getSubExpr(), Temp, false);
- return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ case CastExpr::CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ const RecordType *BaseClassTy = E->getType()->getAs<RecordType>();
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the derived-to-base conversion
+ llvm::Value *Base =
+ GetAddressCXXOfBaseClass(LV.getAddress(), DerivedClassDecl,
+ BaseClassDecl, /*NullCheckValue=*/false);
+
+ return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
+ }
+
+ case CastExpr::CK_ToUnion: {
+ llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
+ EmitAnyExpr(E->getSubExpr(), Temp, false);
+
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+ }
+ }
}
//===--------------------------------------------------------------------===//
@@ -1147,13 +1277,13 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) {
if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
return EmitCXXMemberCallExpr(CE);
-
+
const Decl *TargetDecl = 0;
if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
TargetDecl = DRE->getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
- if (unsigned builtinID = FD->getBuiltinID(getContext()))
+ if (unsigned builtinID = FD->getBuiltinID())
return EmitBuiltinExpr(FD, builtinID, E);
}
}
@@ -1161,7 +1291,17 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) {
if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
return EmitCXXOperatorMemberCallExpr(CE, MD);
-
+
+ if (isa<CXXPseudoDestructorExpr>(E->getCallee())) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ return RValue::get(0);
+ }
+
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
return EmitCall(Callee, E->getCallee()->getType(),
E->arg_begin(), E->arg_end(), TargetDecl);
@@ -1173,7 +1313,7 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
EmitAnyExpr(E->getLHS());
return EmitLValue(E->getRHS());
}
-
+
// Can only get l-value for binary operator expressions which are a
// simple assignment of aggregate type.
if (E->getOpcode() != BinaryOperator::Assign)
@@ -1182,8 +1322,7 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
EmitAggExpr(E, Temp, false);
// FIXME: Are these qualifiers correct?
- return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
}
LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
@@ -1193,21 +1332,18 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
assert(E->getCallReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
-
- return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+
+ return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
}
-
- return LValue::MakeAddr(RV.getAggregateAddr(),
- E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+
+ return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
}
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
// FIXME: This shouldn't require another copy.
llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType()));
EmitAggExpr(E, Temp, false);
- return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
}
LValue
@@ -1219,15 +1355,15 @@ CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) {
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp");
EmitCXXConstructExpr(Temp, E);
- return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers());
+ return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
}
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
-
+
PushCXXTemporary(E->getTemporary(), LV.getAddress());
-
+
return LV;
}
@@ -1235,9 +1371,7 @@ LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitObjCMessageExpr(E);
// FIXME: can this be volatile?
- return LValue::MakeAddr(RV.getAggregateAddr(),
- E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
}
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
@@ -1257,35 +1391,39 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
// FIXME: A lot of the code below could be shared with EmitMemberExpr.
llvm::Value *BaseValue = 0;
const Expr *BaseExpr = E->getBase();
- unsigned CVRQualifiers = 0;
+ Qualifiers BaseQuals;
QualType ObjectTy;
if (E->isArrow()) {
BaseValue = EmitScalarExpr(BaseExpr);
- const PointerType *PTy = BaseExpr->getType()->getAsPointerType();
- ObjectTy = PTy->getPointeeType();
- CVRQualifiers = ObjectTy.getCVRQualifiers();
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
// FIXME: this isn't right for bitfields.
BaseValue = BaseLV.getAddress();
ObjectTy = BaseExpr->getType();
- CVRQualifiers = ObjectTy.getCVRQualifiers();
+ BaseQuals = ObjectTy.getQualifiers();
}
- return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers);
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
}
-LValue
+LValue
CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
- // This is a special l-value that just issues sends when we load or
- // store through it.
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
}
-LValue
-CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) {
- // This is a special l-value that just issues sends when we load or
- // store through it.
+LValue
+CodeGenFunction::EmitObjCKVCRefLValue(
+ const ObjCImplicitSetterGetterRefExpr *E) {
+ // This is a special l-value that just issues sends when we load or store
+ // through it.
return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
}
@@ -1295,31 +1433,36 @@ CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
}
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
-
+
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
// FIXME: can this be volatile?
- return LValue::MakeAddr(RV.getAggregateAddr(),
- E->getType().getCVRQualifiers(),
- getContext().getObjCGCAttrKind(E->getType()));
+ return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
}
-RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
+RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const Decl *TargetDecl) {
- // Get the actual function type. The callee type will always be a
- // pointer to function type or a block pointer type.
- assert(CalleeType->isFunctionPointerType() &&
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
- QualType FnType = CalleeType->getAsPointerType()->getPointeeType();
- QualType ResultType = FnType->getAsFunctionType()->getResultType();
+ QualType FnType = CalleeType->getAs<PointerType>()->getPointeeType();
+ QualType ResultType = FnType->getAs<FunctionType>()->getResultType();
CallArgList Args;
- EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd);
-
- return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), ArgBeg, ArgEnd);
+
+ // FIXME: We should not need to do this, it should be part of the function
+ // type.
+ unsigned CallingConvention = 0;
+ if (const llvm::Function *F =
+ dyn_cast<llvm::Function>(Callee->stripPointerCasts()))
+ CallingConvention = F->getCallingConv();
+ return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+ CallingConvention),
Callee, Args, TargetDecl);
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 412a06594f53..0866ff893c4e 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/StmtVisitor.h"
@@ -35,12 +36,14 @@ class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> {
llvm::Value *DestPtr;
bool VolatileDest;
bool IgnoreResult;
-
+ bool IsInitializer;
+ bool RequiresGCollection;
public:
AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
- bool ignore)
+ bool ignore, bool isinit, bool requiresGCollection)
: CGF(cgf), Builder(CGF.Builder),
- DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore) {
+ DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
+ IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
}
//===--------------------------------------------------------------------===//
@@ -59,7 +62,7 @@ public:
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
-
+
void VisitStmt(Stmt *S) {
CGF.ErrorUnsupported(S, "aggregate expression");
}
@@ -72,35 +75,36 @@ public:
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
void VisitPredefinedExpr(const PredefinedExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
-
+
// Operators.
- void VisitCStyleCastExpr(CStyleCastExpr *E);
- void VisitImplicitCastExpr(ImplicitCastExpr *E);
+ void VisitCastExpr(CastExpr *E);
void VisitCallExpr(const CallExpr *E);
void VisitStmtExpr(const StmtExpr *E);
void VisitBinaryOperator(const BinaryOperator *BO);
void VisitBinAssign(const BinaryOperator *E);
void VisitBinComma(const BinaryOperator *E);
+ void VisitUnaryAddrOf(const UnaryOperator *E);
void VisitObjCMessageExpr(ObjCMessageExpr *E);
void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
EmitAggLoadOfLValue(E);
}
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
- void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E);
-
+ void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
+
void VisitConditionalOperator(const ConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
Visit(DAE->getExpr());
@@ -143,6 +147,12 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
DestPtr = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), "agg.tmp");
}
+ if (RequiresGCollection) {
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ DestPtr, Src.getAggregateAddr(),
+ E->getType());
+ return;
+ }
// If the result of the assignment is used, copy the LHS there also.
// FIXME: Pass VolatileDest as well. I think we also need to merge volatile
// from the source as well, as we can't eliminate it if either operand
@@ -164,25 +174,80 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
// Visitor Methods
//===----------------------------------------------------------------------===//
-void AggExprEmitter::VisitCStyleCastExpr(CStyleCastExpr *E) {
- // GCC union extension
- if (E->getSubExpr()->getType()->isScalarType()) {
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ switch (E->getCastKind()) {
+ default: assert(0 && "Unhandled cast kind!");
+
+ case CastExpr::CK_ToUnion: {
+ // GCC union extension
QualType PtrTy =
- CGF.getContext().getPointerType(E->getSubExpr()->getType());
+ CGF.getContext().getPointerType(E->getSubExpr()->getType());
llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
CGF.ConvertType(PtrTy));
- EmitInitializationToLValue(E->getSubExpr(), LValue::MakeAddr(CastPtr, 0));
- return;
+ EmitInitializationToLValue(E->getSubExpr(),
+ LValue::MakeAddr(CastPtr, Qualifiers()));
+ break;
}
- Visit(E->getSubExpr());
-}
+ // FIXME: Remove the CK_Unknown check here.
+ case CastExpr::CK_Unknown:
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_UserDefinedConversion:
+ case CastExpr::CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CastExpr::CK_NullToMemberPointer: {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
-void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
- assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
- E->getType()) &&
- "Implicit cast types must be compatible");
- Visit(E->getSubExpr());
+ llvm::Value *NullValue = llvm::Constant::getNullValue(PtrDiffTy);
+ llvm::Value *Ptr = Builder.CreateStructGEP(DestPtr, 0, "ptr");
+ Builder.CreateStore(NullValue, Ptr, VolatileDest);
+
+ llvm::Value *Adj = Builder.CreateStructGEP(DestPtr, 1, "adj");
+ Builder.CreateStore(NullValue, Adj, VolatileDest);
+
+ break;
+ }
+
+ case CastExpr::CK_BaseToDerivedMemberPointer: {
+ QualType SrcType = E->getSubExpr()->getType();
+
+ llvm::Value *Src = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(SrcType),
+ "tmp");
+ CGF.EmitAggExpr(E->getSubExpr(), Src, SrcType.isVolatileQualified());
+
+ llvm::Value *SrcPtr = Builder.CreateStructGEP(Src, 0, "src.ptr");
+ SrcPtr = Builder.CreateLoad(SrcPtr);
+
+ llvm::Value *SrcAdj = Builder.CreateStructGEP(Src, 1, "src.adj");
+ SrcAdj = Builder.CreateLoad(SrcAdj);
+
+ llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+ Builder.CreateStore(SrcPtr, DstPtr, VolatileDest);
+
+ llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+
+ // Now See if we need to update the adjustment.
+ const CXXRecordDecl *SrcDecl =
+ cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+ const CXXRecordDecl *DstDecl =
+ cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
+ getClass()->getAs<RecordType>()->getDecl());
+
+ llvm::Constant *Adj = CGF.CGM.GetCXXBaseClassOffset(DstDecl, SrcDecl);
+ if (Adj)
+ SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj");
+
+ Builder.CreateStore(SrcAdj, DstAdj, VolatileDest);
+ break;
+ }
+ }
}
void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
@@ -190,7 +255,7 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
EmitAggLoadOfLValue(E);
return;
}
-
+
RValue RV = CGF.EmitCallExpr(E);
EmitFinalDestCopy(E, RV);
}
@@ -205,14 +270,49 @@ void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
EmitFinalDestCopy(E, RV);
}
-void AggExprEmitter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
RValue RV = CGF.EmitObjCPropertyGet(E);
EmitFinalDestCopy(E, RV);
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
- CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest);
+ CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
+ /*IgnoreResult=*/false, IsInitializer);
+}
+
+void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // We have a member function pointer.
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ assert(MPT->getPointeeType()->isFunctionProtoType() &&
+ "Unexpected member pointer type!");
+
+ const QualifiedDeclRefExpr *DRE = cast<QualifiedDeclRefExpr>(E->getSubExpr());
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
+
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+ llvm::Value *FuncPtr;
+
+ if (MD->isVirtual()) {
+ int64_t Index =
+ CGF.CGM.getVtableInfo().getMethodVtableIndex(MD);
+
+ FuncPtr = llvm::ConstantInt::get(PtrDiffTy, Index + 1);
+ } else {
+ FuncPtr = llvm::ConstantExpr::getPtrToInt(CGF.CGM.GetAddrOfFunction(MD),
+ PtrDiffTy);
+ }
+ Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
+
+ llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+
+ // The adjustment will always be 0.
+ Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
+ VolatileDest);
}
void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
@@ -238,19 +338,25 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
if (!AggLoc)
AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
RValue::getAggregate(AggLoc, VolatileDest));
- }
- else if (LHS.isKVCRef()) {
+ } else if (LHS.isKVCRef()) {
llvm::Value *AggLoc = DestPtr;
if (!AggLoc)
AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
RValue::getAggregate(AggLoc, VolatileDest));
} else {
+ bool RequiresGCollection = false;
+ if (CGF.getContext().getLangOptions().NeXTRuntime) {
+ QualType LHSTy = E->getLHS()->getType();
+ if (const RecordType *FDTTy = LHSTy.getTypePtr()->getAs<RecordType>())
+ RequiresGCollection = FDTTy->getDecl()->hasObjectMember();
+ }
// Codegen the RHS so that it stores directly into the LHS.
- CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified());
+ CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
+ false, false, RequiresGCollection);
EmitFinalDestCopy(E, LHS, true);
}
}
@@ -259,30 +365,34 @@ void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
-
+
llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
-
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(LHSBlock);
-
+
// Handle the GNU extension for missing LHS.
assert(E->getLHS() && "Must have LHS for aggregate value");
Visit(E->getLHS());
CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
-
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
-
+
Visit(E->getRHS());
CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
-
+
CGF.EmitBlock(ContBlock);
}
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
@@ -292,28 +402,30 @@ void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
return;
}
- EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, 0));
+ EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, Qualifiers()));
}
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
llvm::Value *Val = DestPtr;
-
+
if (!Val) {
// Create a temporary variable.
Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
// FIXME: volatile
CGF.EmitAggExpr(E->getSubExpr(), Val, false);
- } else
+ } else
Visit(E->getSubExpr());
-
- CGF.PushCXXTemporary(E->getTemporary(), Val);
+
+ // Don't make this a live temporary if we're emitting an initializer expr.
+ if (!IsInitializer)
+ CGF.PushCXXTemporary(E->getTemporary(), Val);
}
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
llvm::Value *Val = DestPtr;
-
+
if (!Val) {
// Create a temporary variable.
Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
@@ -323,7 +435,7 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
}
void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
- CGF.EmitCXXExprWithTemporaries(E, DestPtr, VolatileDest);
+ CGF.EmitCXXExprWithTemporaries(E, DestPtr, VolatileDest, IsInitializer);
}
void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
@@ -359,7 +471,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
- // FIXME: Disabled while we figure out what to do about
+ // FIXME: Disabled while we figure out what to do about
// test/CodeGen/bitfield.c
//
// If we can, prefer a copy from a global; this is a lot less code for long
@@ -387,7 +499,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
cast<llvm::PointerType>(DestPtr->getType());
const llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
-
+
uint64_t NumInitElements = E->getNumInits();
if (E->getNumInits() > 0) {
@@ -402,29 +514,30 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
uint64_t NumArrayElements = AType->getNumElements();
QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
-
- unsigned CVRqualifier = ElementType.getCVRQualifiers();
+
+ // FIXME: were we intentionally ignoring address spaces and GC attributes?
+ Qualifiers Quals = CGF.MakeQualifiers(ElementType);
for (uint64_t i = 0; i != NumArrayElements; ++i) {
llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
if (i < NumInitElements)
EmitInitializationToLValue(E->getInit(i),
- LValue::MakeAddr(NextVal, CVRqualifier));
+ LValue::MakeAddr(NextVal, Quals));
else
- EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, CVRqualifier),
+ EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, Quals),
ElementType);
}
return;
}
-
+
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
-
+
// Do struct initialization; this code just sets each individual member
// to the approprate value. This makes bitfield support automatic;
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = E->getNumInits();
- RecordDecl *SD = E->getType()->getAsRecordType()->getDecl();
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
unsigned CurInitVal = 0;
if (E->getType()->isUnionType()) {
@@ -432,7 +545,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// specified by the initializer list.
if (!E->getInitializedFieldInUnion()) {
// Empty union; we have nothing to do.
-
+
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
@@ -458,7 +571,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
-
+
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
for (RecordDecl::field_iterator Field = SD->field_begin(),
@@ -494,13 +607,16 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
/// the value of the aggregate expression is not needed. If VolatileDest is
/// true, DestPtr cannot be 0.
void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
- bool VolatileDest, bool IgnoreResult) {
+ bool VolatileDest, bool IgnoreResult,
+ bool IsInitializer,
+ bool RequiresGCollection) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
assert ((DestPtr != 0 || VolatileDest == false)
&& "volatile aggregate can't be 0");
-
- AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult)
+
+ AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
+ RequiresGCollection)
.Visit(const_cast<Expr*>(E));
}
@@ -514,7 +630,7 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
-
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
@@ -525,18 +641,19 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
// equal, but other compilers do this optimization, and almost every memcpy
// implementation handles this case safely. If there is a libc that does not
// safely handle this, we can add a target hook.
- const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
-
+
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
-
+
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth);
-
+ const llvm::Type *IntPtr =
+ llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
//
@@ -553,6 +670,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
TypeInfo.second/8));
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 3555c8c9b691..9e81e4fbeabe 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -46,7 +46,7 @@ public:
IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
}
-
+
//===--------------------------------------------------------------------===//
// Utilities
//===--------------------------------------------------------------------===//
@@ -82,23 +82,23 @@ public:
if (LV.isPropertyRef())
return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
-
+
assert(LV.isKVCRef() && "Unknown LValue type!");
return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
}
-
+
/// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
/// the real and imaginary pieces.
ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
-
+
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
-
+
/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
QualType DestType);
-
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
@@ -111,16 +111,17 @@ public:
ComplexPairTy VisitExpr(Expr *S);
ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
-
+
// l-values.
ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
- ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
return EmitLoadOfLValue(E);
}
- ComplexPairTy VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
return EmitLoadOfLValue(E);
}
ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -130,7 +131,7 @@ public:
ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
// FIXME: CompoundLiteralExpr
-
+
ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
@@ -180,23 +181,24 @@ public:
}
ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
- QualType Elem = E->getType()->getAsComplexType()->getElementType();
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
return ComplexPairTy(Null, Null);
}
ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
- QualType Elem = E->getType()->getAsComplexType()->getElementType();
- llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
return ComplexPairTy(Null, Null);
}
-
+
struct BinOpInfo {
ComplexPairTy LHS;
ComplexPairTy RHS;
QualType Ty; // Computation Type.
- };
-
+ };
+
BinOpInfo EmitBinOps(const BinaryOperator *E);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
@@ -206,7 +208,7 @@ public:
ComplexPairTy EmitBinSub(const BinOpInfo &Op);
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
-
+
ComplexPairTy VisitBinMul(const BinaryOperator *E) {
return EmitBinMul(EmitBinOps(E));
}
@@ -219,7 +221,7 @@ public:
ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
return EmitBinDiv(EmitBinOps(E));
}
-
+
// Compound assignments.
ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
@@ -233,7 +235,7 @@ public:
ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
}
-
+
// GCC rejects rem/and/or/xor for integer complex.
// Logical and/or always return int, never complex.
@@ -241,7 +243,7 @@ public:
ComplexPairTy VisitBinAssign (const BinaryOperator *E);
ComplexPairTy VisitBinComma (const BinaryOperator *E);
-
+
ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
@@ -259,27 +261,34 @@ public:
/// load the real and imaginary pieces, returning them as Real/Imag.
ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
bool isVolatile) {
- llvm::SmallString<64> Name(SrcPtr->getNameStart(),
- SrcPtr->getNameStart()+SrcPtr->getNameLen());
-
+ llvm::SmallString<64> Name(SrcPtr->getName().begin(),
+ SrcPtr->getName().end());
+
llvm::Value *Real=0, *Imag=0;
if (!IgnoreReal) {
+ // FIXME: Clean this up once builder takes Twine/StringRef.
Name += ".realp";
- llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0, Name.c_str());
+ llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0,
+ Name.str().str().c_str());
Name.pop_back(); // .realp -> .real
- Real = Builder.CreateLoad(RealPtr, isVolatile, Name.c_str());
+ // FIXME: Clean this up once builder takes Twine/StringRef.
+ Real = Builder.CreateLoad(RealPtr, isVolatile,
+ Name.str().str().c_str());
Name.resize(Name.size()-4); // .real -> .imagp
}
-
+
if (!IgnoreImag) {
Name += "imagp";
-
- llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, Name.c_str());
+
+ // FIXME: Clean this up once builder takes Twine/StringRef.
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1,
+ Name.str().str().c_str());
Name.pop_back(); // .imagp -> .imag
- Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.c_str());
+ // FIXME: Clean this up once builder takes Twine/StringRef.
+ Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.str().str().c_str());
}
return ComplexPairTy(Real, Imag);
}
@@ -290,7 +299,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
bool isVolatile) {
llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
-
+
Builder.CreateStore(Val.first, RealPtr, isVolatile);
Builder.CreateStore(Val.second, ImagPtr, isVolatile);
}
@@ -303,8 +312,8 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
CGF.ErrorUnsupported(E, "complex expression");
- const llvm::Type *EltTy =
- CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
}
@@ -312,7 +321,8 @@ ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
ComplexPairTy ComplexExprEmitter::
VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
- return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+ return
+ ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
}
@@ -332,8 +342,8 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
QualType SrcType,
QualType DestType) {
// Get the src/dest element type.
- SrcType = SrcType->getAsComplexType()->getElementType();
- DestType = DestType->getAsComplexType()->getElementType();
+ SrcType = SrcType->getAs<ComplexType>()->getElementType();
+ DestType = DestType->getAs<ComplexType>()->getElementType();
// C99 6.3.1.6: When a value of complex type is converted to another
// complex type, both the real and imaginary parts follow the conversion
@@ -347,7 +357,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
// Two cases here: cast from (complex to complex) and (scalar to complex).
if (Op->getType()->isAnyComplexType())
return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
-
+
// C99 6.3.1.7: When a value of real type is converted to a complex type, the
// real part of the complex result value is determined by the rules of
// conversion to the corresponding real type and the imaginary part of the
@@ -355,9 +365,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
llvm::Value *Elt = CGF.EmitScalarExpr(Op);
// Convert the input element to the element type of the complex.
- DestTy = DestTy->getAsComplexType()->getElementType();
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
-
+
// Return (realval, 0).
return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
}
@@ -367,31 +377,30 @@ ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
LValue LV = CGF.EmitLValue(E->getSubExpr());
ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(),
LV.isVolatileQualified());
-
+
llvm::Value *NextVal;
if (isa<llvm::IntegerType>(InVal.first->getType())) {
uint64_t AmountVal = isInc ? 1 : -1;
NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
-
+
// Add the inc/dec to the real part.
NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
-
} else {
- QualType ElemTy = E->getType()->getAsComplexType()->getElementType();
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1);
if (!isInc)
FVal.changeSign();
- NextVal = llvm::ConstantFP::get(FVal);
-
+ NextVal = llvm::ConstantFP::get(CGF.getLLVMContext(), FVal);
+
// Add the inc/dec to the real part.
NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
}
-
+
ComplexPairTy IncVal(NextVal, InVal.second);
-
+
// Store the updated result through the lvalue.
EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified());
-
+
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
return isPre ? IncVal : InVal;
@@ -403,7 +412,7 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
TestAndClearIgnoreRealAssign();
TestAndClearIgnoreImagAssign();
ComplexPairTy Op = Visit(E->getSubExpr());
-
+
llvm::Value *ResR, *ResI;
if (Op.first->getType()->isFloatingPoint()) {
ResR = Builder.CreateFNeg(Op.first, "neg.r");
@@ -427,13 +436,13 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
ResI = Builder.CreateFNeg(Op.second, "conj.i");
else
ResI = Builder.CreateNeg(Op.second, "conj.i");
-
+
return ComplexPairTy(Op.first, ResI);
}
ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
-
+
if (Op.LHS.first->getType()->isFloatingPoint()) {
ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
@@ -460,12 +469,12 @@ ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
using llvm::Value;
Value *ResR, *ResI;
-
+
if (Op.LHS.first->getType()->isFloatingPoint()) {
Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
-
+
Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
@@ -473,7 +482,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
-
+
Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
@@ -484,7 +493,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
-
+
llvm::Value *DSTr, *DSTi;
if (Op.LHS.first->getType()->isFloatingPoint()) {
@@ -492,15 +501,15 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c
llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d
llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd
-
+
llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c
llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d
llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd
-
+
llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c
llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d
llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad
-
+
DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
} else {
@@ -508,16 +517,16 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
-
+
llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
-
+
llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
-
- if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) {
+
+ if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
} else {
@@ -525,11 +534,11 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
}
}
-
+
return ComplexPairTy(DSTr, DSTi);
}
-ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::BinOpInfo
ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
@@ -554,27 +563,27 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
BinOpInfo OpInfo;
-
+
// Load the RHS and LHS operands.
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little. It is possible for the RHS to be complex or
// scalar.
OpInfo.Ty = E->getComputationResultType();
OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
-
+
LValue LHSLV = CGF.EmitLValue(E->getLHS());
// We know the LHS is a complex lvalue.
- OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(),LHSLV.isVolatileQualified());
+ OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty);
-
+
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
-
+
// Truncate the result back to the LHS type.
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
-
+
// Store the result value into the LHS lvalue.
EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
// And now return the LHS
@@ -598,7 +607,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Compute the address to store into.
LValue LHS = CGF.EmitLValue(E->getLHS());
-
+
// Store into it, if simple.
if (LHS.isSimple()) {
EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
@@ -610,7 +619,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
IgnoreImagAssign = ignimag;
return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
}
-
+
// Otherwise we must have a property setter (no complex vector/bitfields).
if (LHS.isPropertyRef())
CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
@@ -641,27 +650,27 @@ VisitConditionalOperator(const ConditionalOperator *E) {
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
-
+
llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
-
+
CGF.EmitBlock(LHSBlock);
-
+
// Handle the GNU extension for missing LHS.
assert(E->getLHS() && "Must have LHS for complex value");
ComplexPairTy LHS = Visit(E->getLHS());
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
-
+
CGF.EmitBlock(RHSBlock);
-
+
ComplexPairTy RHS = Visit(E->getRHS());
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
-
+
CGF.EmitBlock(ContBlock);
-
+
// Create a PHI node for the real part.
llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
RealPN->reserveOperandSpace(2);
@@ -673,7 +682,7 @@ VisitConditionalOperator(const ConditionalOperator *E) {
ImagPN->reserveOperandSpace(2);
ImagPN->addIncoming(LHS.second, LHSBlock);
ImagPN->addIncoming(RHS.second, RHSBlock);
-
+
return ComplexPairTy(RealPN, ImagPN);
}
@@ -692,7 +701,7 @@ ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
return Visit(E->getInit(0));
// Empty init list intializes to null
- QualType Ty = E->getType()->getAsComplexType()->getElementType();
+ QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
const llvm::Type* LTy = CGF.ConvertType(Ty);
llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
return ComplexPairTy(zeroConstant, zeroConstant);
@@ -704,8 +713,8 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
if (!ArgPtr) {
CGF.ErrorUnsupported(E, "complex va_arg expression");
- const llvm::Type *EltTy =
- CGF.ConvertType(E->getType()->getAsComplexType()->getElementType());
+ const llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
llvm::Value *U = llvm::UndefValue::get(EltTy);
return ComplexPairTy(U, U);
}
@@ -724,7 +733,7 @@ ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
assert(E && E->getType()->isAnyComplexType() &&
"Invalid complex expression to emit");
-
+
return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
IgnoreImagAssign)
.Visit(const_cast<Expr*>(E));
@@ -750,7 +759,7 @@ void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
}
/// LoadComplexFromAddr - Load a complex number from the specified address.
-ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
bool SrcIsVolatile) {
return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 37c9c366fee6..7f540c3c0688 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "llvm/Constants.h"
@@ -27,45 +28,541 @@ using namespace clang;
using namespace CodeGen;
namespace {
-class VISIBILITY_HIDDEN ConstExprEmitter :
+
+class VISIBILITY_HIDDEN ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+
+ unsigned NextFieldOffsetInBytes;
+
+ unsigned LLVMStructAlignment;
+
+ std::vector<llvm::Constant *> Elements;
+
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
+ LLVMStructAlignment(1) { }
+
+ bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ const Expr *InitExpr) {
+ uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+ && "Field offset mismatch!");
+
+ // Emit the field.
+ llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
+ if (!C)
+ return false;
+
+ unsigned FieldAlignment = getAlignment(C);
+
+ // Round up the field offset to the alignment of the field type.
+ uint64_t AlignedNextFieldOffsetInBytes =
+ llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+ if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+
+ assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+ }
+
+ // Add the field.
+ Elements.push_back(C);
+ NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
+
+ if (Packed)
+ assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+
+ return true;
+ }
+
+ bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ const Expr *InitExpr) {
+ llvm::ConstantInt *CI =
+ cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
+ Field->getType(),
+ CGF));
+ // FIXME: Can this ever happen?
+ if (!CI)
+ return false;
+
+ if (FieldOffset > NextFieldOffsetInBytes * 8) {
+ // We need to add padding.
+ uint64_t NumBytes =
+ llvm::RoundUpToAlignment(FieldOffset -
+ NextFieldOffsetInBytes * 8, 8) / 8;
+
+ AppendPadding(NumBytes);
+ }
+
+ uint64_t FieldSize =
+ Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue.trunc(FieldSize);
+
+ if (FieldOffset < NextFieldOffsetInBytes * 8) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte =
+ NextFieldOffsetInBytes * 8 - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp.zext(8);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(8 - BitsInPreviousByte);
+ }
+
+ // Or in the bits that go into the previous byte.
+ Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue();
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return true;
+ }
+
+ while (FieldValue.getBitWidth() > 8) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp = FieldValue;
+ Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
+ Tmp.trunc(8);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue;
+ Tmp.trunc(8);
+
+ FieldValue = FieldValue.lshr(8);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ NextFieldOffsetInBytes++;
+
+ FieldValue.trunc(FieldValue.getBitWidth() - 8);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= 8 &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < 8) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue.zext(8);
+ FieldValue = FieldValue << (8 - BitWidth);
+ } else
+ FieldValue.zext(8);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ NextFieldOffsetInBytes++;
+ return true;
+ }
+
+ void AppendPadding(uint64_t NumBytes) {
+ if (!NumBytes)
+ return;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *C = llvm::Constant::getNullValue(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInBytes += getSizeInBytes(C);
+ }
+
+ void AppendTailPadding(uint64_t RecordSize) {
+ assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+ uint64_t RecordSizeInBytes = RecordSize / 8;
+ assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+ unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+ AppendPadding(NumPadBytes);
+ }
+
+ void ConvertStructToPacked() {
+ std::vector<llvm::Constant *> PackedElements;
+ uint64_t ElementOffsetInBytes = 0;
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ unsigned ElementAlign =
+ CGM.getTargetData().getABITypeAlignment(C->getType());
+ uint64_t AlignedElementOffsetInBytes =
+ llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+
+ if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+ // We need some padding.
+ uint64_t NumBytes =
+ AlignedElementOffsetInBytes - ElementOffsetInBytes;
+
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(CGF->getLLVMContext());
+ if (NumBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+ llvm::Constant *Padding = llvm::Constant::getNullValue(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInBytes += getSizeInBytes(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInBytes += getSizeInBytes(C);
+ }
+
+ assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+ "Packing the struct changed its size!");
+
+ Elements = PackedElements;
+ LLVMStructAlignment = 1;
+ Packed = true;
+ }
+
+ bool Build(InitListExpr *ILE) {
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ ElementNo < ILE->getNumInits() && Field != FieldEnd;
+ ++Field, ++FieldNo) {
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ if (Field->isBitField()) {
+ if (!Field->getIdentifier())
+ continue;
+
+ if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ ILE->getInit(ElementNo)))
+ return false;
+ } else {
+ if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
+ ILE->getInit(ElementNo)))
+ return false;
+ }
+
+ ElementNo++;
+ }
+
+ uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+
+ if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ return true;
+ }
+
+ uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes,
+ LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInBytes <= LayoutSizeInBytes &&
+ LLVMSizeInBytes > LayoutSizeInBytes) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInBytes == LayoutSizeInBytes &&
+ "Converting to packed did not help!");
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+ "Tail padding mismatch!");
+
+ return true;
+ }
+
+ unsigned getAlignment(const llvm::Constant *C) const {
+ if (Packed)
+ return 1;
+
+ return CGM.getTargetData().getABITypeAlignment(C->getType());
+ }
+
+ uint64_t getSizeInBytes(const llvm::Constant *C) const {
+ return CGM.getTargetData().getTypeAllocSize(C->getType());
+ }
+
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ llvm::Constant *Result =
+ llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Builder.Elements, Builder.Packed);
+
+ assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
+ Builder.getAlignment(Result)) ==
+ Builder.getSizeInBytes(Result) && "Size mismatch!");
+
+ return Result;
+ }
+};
+
+class VISIBILITY_HIDDEN ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
public:
ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
- : CGM(cgm), CGF(cgf) {
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
}
-
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
-
+
llvm::Constant *VisitStmt(Stmt *S) {
return 0;
}
-
- llvm::Constant *VisitParenExpr(ParenExpr *PE) {
- return Visit(PE->getSubExpr());
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
}
-
+
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
-
+
+ llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
+ assert(MD->isInstance() && "Member function must not be static!");
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ llvm::Constant *Values[2];
+
+ // Get the function pointer (or index if this is a virtual function).
+ if (MD->isVirtual()) {
+ int64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD);
+
+ Values[0] = llvm::ConstantInt::get(PtrDiffTy, Index + 1);
+ } else {
+ llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD);
+
+ Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
+ }
+
+ // The adjustment will always be 0.
+ Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
+
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(),
+ Values, 2, /*Packed=*/false);
+ }
+
+ llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
+ if (const MemberPointerType *MPT =
+ E->getType()->getAs<MemberPointerType>()) {
+ QualType T = MPT->getPointeeType();
+ if (T->isFunctionProtoType()) {
+ QualifiedDeclRefExpr *DRE = cast<QualifiedDeclRefExpr>(E->getSubExpr());
+
+ return EmitMemberFunctionPointer(cast<CXXMethodDecl>(DRE->getDecl()));
+ }
+
+ // FIXME: Should we handle other member pointer types here too,
+ // or should they be handled by Expr::Evaluate?
+ }
+
+ return 0;
+ }
+
+ llvm::Constant *VisitBinSub(BinaryOperator *E) {
+ // This must be a pointer/pointer subtraction. This only happens for
+ // address of label.
+ if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
+ !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
+ return 0;
+
+ llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
+ E->getLHS()->getType(), CGF);
+ llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
+ E->getRHS()->getType(), CGF);
+
+ const llvm::Type *ResultType = ConvertType(E->getType());
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
+
+ // No need to divide by element size, since addr of label is always void*,
+ // which has size 1 in GNUish.
+ return llvm::ConstantExpr::getSub(LHS, RHS);
+ }
+
llvm::Constant *VisitCastExpr(CastExpr* E) {
- // GCC cast to union extension
- if (E->getType()->isUnionType()) {
+ switch (E->getCastKind()) {
+ case CastExpr::CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
const llvm::Type *Ty = ConvertType(E->getType());
Expr *SubExpr = E->getSubExpr();
- return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF),
- Ty);
+
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C)
+ return 0;
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ std::vector<llvm::Constant*> Elts;
+ std::vector<const llvm::Type*> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::Constant::getNullValue(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+ case CastExpr::CK_NullToMemberPointer:
+ return CGM.EmitNullConstant(E->getType());
+
+ case CastExpr::CK_BaseToDerivedMemberPointer: {
+ Expr *SubExpr = E->getSubExpr();
+
+ const MemberPointerType *SrcTy =
+ SubExpr->getType()->getAs<MemberPointerType>();
+ const MemberPointerType *DestTy =
+ E->getType()->getAs<MemberPointerType>();
+
+ const CXXRecordDecl *BaseClass =
+ cast<CXXRecordDecl>(cast<RecordType>(SrcTy->getClass())->getDecl());
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
+
+ if (SrcTy->getPointeeType()->isFunctionProtoType()) {
+ llvm::Constant *C =
+ CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+ if (!C)
+ return 0;
+
+ llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
+
+ // Check if we need to update the adjustment.
+ if (llvm::Constant *Offset = CGM.GetCXXBaseClassOffset(DerivedClass,
+ BaseClass)) {
+ llvm::Constant *Values[2];
+
+ Values[0] = CS->getOperand(0);
+ Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
+ return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2,
+ /*Packed=*/false);
+ }
+
+ return CS;
+ }
+ }
+
+ default: {
+ // FIXME: This should be handled by the CK_NoOp cast kind.
+ // Explicit and implicit no-op casts
+ QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+ if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
+ return Visit(E->getSubExpr());
+
+ // Handle integer->integer casts for address-of-label differences.
+ if (Ty->isIntegerType() && SubTy->isIntegerType() &&
+ CGF) {
+ llvm::Value *Src = Visit(E->getSubExpr());
+ if (Src == 0) return 0;
+
+ // Use EmitScalarConversion to perform the conversion.
+ return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
+ }
+
+ return 0;
}
- // Explicit and implicit no-op casts
- QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
- if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) {
- return Visit(E->getSubExpr());
}
- return 0;
}
llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
@@ -79,7 +576,7 @@ public:
unsigned NumInitElements = ILE->getNumInits();
// FIXME: Check for wide strings
// FIXME: Check for NumInitElements exactly equal to 1??
- if (NumInitElements > 0 &&
+ if (NumInitElements > 0 &&
(isa<StringLiteral>(ILE->getInit(0)) ||
isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
@@ -87,7 +584,7 @@ public:
const llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
- // Initialising an array requires us to automatically
+ // Initialising an array requires us to automatically
// initialise any elements that have not been initialised explicitly
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
@@ -113,184 +610,20 @@ public:
std::vector<const llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
- const llvm::StructType *SType = llvm::StructType::get(Types, true);
+ const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
return llvm::ConstantStruct::get(SType, Elts);
}
- return llvm::ConstantArray::get(AType, Elts);
- }
-
- void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts,
- FieldDecl* Field, Expr* E) {
- // Calculate the value to insert
- llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF);
- if (!C)
- return;
-
- llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C);
- if (!CI) {
- CGM.ErrorUnsupported(E, "bitfield initialization");
- return;
- }
- llvm::APInt V = CI->getValue();
-
- // Calculate information about the relevant field
- const llvm::Type* Ty = CI->getType();
- const llvm::TargetData &TD = CGM.getTypes().getTargetData();
- unsigned size = TD.getTypeAllocSizeInBits(Ty);
- unsigned fieldOffset = CGM.getTypes().getLLVMFieldNo(Field) * size;
- CodeGenTypes::BitFieldInfo bitFieldInfo =
- CGM.getTypes().getBitFieldInfo(Field);
- fieldOffset += bitFieldInfo.Begin;
-
- // Find where to start the insertion
- // FIXME: This is O(n^2) in the number of bit-fields!
- // FIXME: This won't work if the struct isn't completely packed!
- unsigned offset = 0, i = 0;
- while (offset < (fieldOffset & -8))
- offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType());
-
- // Advance over 0 sized elements (must terminate in bounds since
- // the bitfield must have a size).
- while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0)
- ++i;
-
- // Promote the size of V if necessary
- // FIXME: This should never occur, but currently it can because initializer
- // constants are cast to bool, and because clang is not enforcing bitfield
- // width limits.
- if (bitFieldInfo.Size > V.getBitWidth())
- V.zext(bitFieldInfo.Size);
-
- // Insert the bits into the struct
- // FIXME: This algorthm is only correct on X86!
- // FIXME: THis algorthm assumes bit-fields only have byte-size elements!
- unsigned bitsToInsert = bitFieldInfo.Size;
- unsigned curBits = std::min(8 - (fieldOffset & 7), bitsToInsert);
- unsigned byte = V.getLoBits(curBits).getZExtValue() << (fieldOffset & 7);
- do {
- llvm::Constant* byteC = llvm::ConstantInt::get(llvm::Type::Int8Ty, byte);
- Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC);
- ++i;
- V = V.lshr(curBits);
- bitsToInsert -= curBits;
-
- if (!bitsToInsert)
- break;
-
- curBits = bitsToInsert > 8 ? 8 : bitsToInsert;
- byte = V.getLoBits(curBits).getZExtValue();
- } while (true);
+ return llvm::ConstantArray::get(AType, Elts);
}
llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
- const llvm::StructType *SType =
- cast<llvm::StructType>(ConvertType(ILE->getType()));
- RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
- std::vector<llvm::Constant*> Elts;
-
- // Initialize the whole structure to zero.
- // FIXME: This doesn't handle member pointers correctly!
- for (unsigned i = 0; i < SType->getNumElements(); ++i) {
- const llvm::Type *FieldTy = SType->getElementType(i);
- Elts.push_back(llvm::Constant::getNullValue(FieldTy));
- }
-
- // Copy initializer elements. Skip padding fields.
- unsigned EltNo = 0; // Element no in ILE
- bool RewriteType = false;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) {
- if (Field->isBitField()) {
- if (!Field->getIdentifier())
- continue;
- InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo));
- } else {
- unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field);
- llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo),
- Field->getType(), CGF);
- if (!C) return 0;
- RewriteType |= (C->getType() != Elts[FieldNo]->getType());
- Elts[FieldNo] = C;
- }
- EltNo++;
- }
-
- if (RewriteType) {
- // FIXME: Make this work for non-packed structs
- assert(SType->isPacked() && "Cannot recreate unpacked structs");
- std::vector<const llvm::Type*> Types;
- for (unsigned i = 0; i < Elts.size(); ++i)
- Types.push_back(Elts[i]->getType());
- SType = llvm::StructType::get(Types, true);
- }
-
- return llvm::ConstantStruct::get(SType, Elts);
- }
-
- llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) {
- if (!C)
- return 0;
-
- // Build a struct with the union sub-element as the first member,
- // and padded to the appropriate size
- std::vector<llvm::Constant*> Elts;
- std::vector<const llvm::Type*> Types;
- Elts.push_back(C);
- Types.push_back(C->getType());
- unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
- unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
- while (CurSize < TotalSize) {
- Elts.push_back(llvm::Constant::getNullValue(llvm::Type::Int8Ty));
- Types.push_back(llvm::Type::Int8Ty);
- CurSize++;
- }
-
- // This always generates a packed struct
- // FIXME: Try to generate an unpacked struct when we can
- llvm::StructType* STy = llvm::StructType::get(Types, true);
- return llvm::ConstantStruct::get(STy, Elts);
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
- const llvm::Type *Ty = ConvertType(ILE->getType());
-
- FieldDecl* curField = ILE->getInitializedFieldInUnion();
- if (!curField) {
- // There's no field to initialize, so value-initialize the union.
-#ifndef NDEBUG
- // Make sure that it's really an empty and not a failure of
- // semantic analysis.
- RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl();
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
- Field != FieldEnd; ++Field)
- assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
-#endif
- return llvm::Constant::getNullValue(Ty);
- }
-
- if (curField->isBitField()) {
- // Create a dummy struct for bit-field insertion
- unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty);
- llvm::Constant* NV = llvm::Constant::getNullValue(llvm::Type::Int8Ty);
- std::vector<llvm::Constant*> Elts(NumElts, NV);
-
- InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0));
- const llvm::ArrayType *RetTy =
- llvm::ArrayType::get(NV->getType(), NumElts);
- return llvm::ConstantArray::get(RetTy, Elts);
- }
-
- llvm::Constant *InitElem;
- if (ILE->getNumInits() > 0) {
- Expr *Init = ILE->getInit(0);
- InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
- } else {
- InitElem = CGM.EmitNullConstant(curField->getType());
- }
- return EmitUnion(InitElem, Ty);
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) {
@@ -316,13 +649,13 @@ public:
for (; i < NumElements; ++i)
Elts.push_back(llvm::Constant::getNullValue(ElemTy));
- return llvm::ConstantVector::get(VType, Elts);
+ return llvm::ConstantVector::get(VType, Elts);
}
-
+
llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
return CGM.EmitNullConstant(E->getType());
}
-
+
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
if (ILE->getType()->isScalarType()) {
// We have a scalar in braces. Just use the first element.
@@ -332,7 +665,7 @@ public:
}
return CGM.EmitNullConstant(ILE->getType());
}
-
+
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -353,11 +686,12 @@ public:
llvm::Constant *VisitStringLiteral(StringLiteral *E) {
assert(!E->getType()->isPointerType() && "Strings are always arrays");
-
+
// This must be a string initializing an array in a static initializer.
// Don't emit it as the address of the string, emit the string data itself
// as an inline array.
- return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false);
+ return llvm::ConstantArray::get(VMContext,
+ CGM.GetStringForStringLiteral(E), false);
}
llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
@@ -367,13 +701,13 @@ public:
std::string Str;
CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
-
+
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
Str.resize(CAT->getSize().getZExtValue(), '\0');
- return llvm::ConstantArray::get(Str, false);
+ return llvm::ConstantArray::get(VMContext, Str, false);
}
-
+
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
@@ -394,20 +728,21 @@ public:
llvm::Constant* C = Visit(CLE->getInitializer());
// FIXME: "Leaked" on failure.
if (C)
- C = new llvm::GlobalVariable(C->getType(),
- E->getType().isConstQualified(),
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
- C, ".compoundliteral", &CGM.getModule());
+ C, ".compoundliteral", 0, false,
+ E->getType().getAddressSpace());
return C;
}
- case Expr::DeclRefExprClass:
+ case Expr::DeclRefExprClass:
case Expr::QualifiedDeclRefExprClass: {
NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
- return CGM.GetAddrOfFunction(GlobalDecl(FD));
+ return CGM.GetAddrOfFunction(FD);
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
// We can never refer to a variable with local storage.
- if (!VD->hasLocalStorage()) {
+ if (!VD->hasLocalStorage()) {
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
else if (VD->isBlockVarDecl()) {
@@ -430,21 +765,23 @@ public:
case Expr::PredefinedExprClass: {
// __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level".
std::string Str;
- if (cast<PredefinedExpr>(E)->getIdentType() ==
+ if (cast<PredefinedExpr>(E)->getIdentType() ==
PredefinedExpr::PrettyFunction)
Str = "top level";
-
+
return CGM.GetAddrOfConstantCString(Str, ".tmp");
}
case Expr::AddrLabelExprClass: {
assert(CGF && "Invalid address of label expression outside function.");
- unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
- llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id);
+ unsigned id =
+ CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ llvm::Constant *C =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), id);
return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType()));
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall(CGM.getContext()) !=
+ if (CE->isBuiltinCall(CGM.getContext()) !=
Builtin::BI__builtin___CFStringMakeConstantString)
break;
const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
@@ -466,23 +803,23 @@ public:
return 0;
}
};
-
+
} // end anonymous namespace.
llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
QualType DestType,
CodeGenFunction *CGF) {
Expr::EvalResult Result;
-
+
bool Success = false;
-
+
if (DestType->isReferenceType())
Success = E->EvaluateAsLValue(Result, Context);
- else
+ else
Success = E->Evaluate(Result, Context);
-
+
if (Success) {
- assert(!Result.HasSideEffects &&
+ assert(!Result.HasSideEffects &&
"Constant expr should not have any side effects!");
switch (Result.Val.getKind()) {
case APValue::Uninitialized:
@@ -490,18 +827,17 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return 0;
case APValue::LValue: {
const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
- llvm::Constant *Offset =
- llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
Result.Val.getLValueOffset());
-
+
llvm::Constant *C;
if (const Expr *LVBase = Result.Val.getLValueBase()) {
C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
// Apply offset if necessary.
if (!Offset->isNullValue()) {
- const llvm::Type *Type =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
@@ -529,9 +865,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
}
}
case APValue::Int: {
- llvm::Constant *C = llvm::ConstantInt::get(Result.Val.getInt());
-
- if (C->getType() == llvm::Type::Int1Ty) {
+ llvm::Constant *C = llvm::ConstantInt::get(VMContext,
+ Result.Val.getInt());
+
+ if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
@@ -539,32 +876,38 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
}
case APValue::ComplexInt: {
llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantInt::get(Result.Val.getComplexIntReal());
- Complex[1] = llvm::ConstantInt::get(Result.Val.getComplexIntImag());
-
- return llvm::ConstantStruct::get(Complex, 2);
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Result.Val.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
}
case APValue::Float:
- return llvm::ConstantFP::get(Result.Val.getFloat());
+ return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
case APValue::ComplexFloat: {
llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantFP::get(Result.Val.getComplexFloatReal());
- Complex[1] = llvm::ConstantFP::get(Result.Val.getComplexFloatImag());
-
- return llvm::ConstantStruct::get(Complex, 2);
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Result.Val.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
}
case APValue::Vector: {
llvm::SmallVector<llvm::Constant *, 4> Inits;
unsigned NumElts = Result.Val.getVectorLength();
-
+
for (unsigned i = 0; i != NumElts; ++i) {
APValue &Elt = Result.Val.getVectorElt(i);
if (Elt.isInt())
- Inits.push_back(llvm::ConstantInt::get(Elt.getInt()));
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
else
- Inits.push_back(llvm::ConstantFP::get(Elt.getFloat()));
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
}
return llvm::ConstantVector::get(&Inits[0], Inits.size());
}
@@ -572,15 +915,58 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
}
llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
- if (C && C->getType() == llvm::Type::Int1Ty) {
+ if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
}
return C;
}
+static inline bool isDataMemberPointerType(QualType T) {
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return !MPT->getPointeeType()->isFunctionType();
+
+ return false;
+}
+
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
- // Always return an LLVM null constant for now; this will change when we
- // get support for IRGen of member pointers.
- return llvm::Constant::getNullValue(getTypes().ConvertType(T));
+ // No need to check for member pointers when not compiling C++.
+ if (!getContext().getLangOptions().CPlusPlus)
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+
+ QualType ElementTy = CAT->getElementType();
+
+ // FIXME: Handle arrays of structs that contain member pointers.
+ if (isDataMemberPointerType(Context.getBaseElementType(ElementTy))) {
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ uint64_t NumElements = CAT->getSize().getZExtValue();
+ std::vector<llvm::Constant *> Array(NumElements);
+ for (uint64_t i = 0; i != NumElements; ++i)
+ Array[i] = Element;
+
+ const llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ Types.ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+ if (Layout.containsMemberPointer()) {
+ assert(0 && "FIXME: No support for structs with member pointers yet!");
+ }
+ }
+
+ // FIXME: Handle structs that contain member pointers.
+ if (isDataMemberPointerType(T))
+ return llvm::Constant::getAllOnesValue(getTypes().ConvertTypeForMem(T));
+
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 2af0639f5ce3..cc81256032af 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -49,20 +50,23 @@ class VISIBILITY_HIDDEN ScalarExprEmitter
CodeGenFunction &CGF;
CGBuilderTy &Builder;
bool IgnoreResultAssign;
-
+ llvm::LLVMContext &VMContext;
public:
ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
- : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira) {
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
}
-
+
//===--------------------------------------------------------------------===//
// Utilities
//===--------------------------------------------------------------------===//
bool TestAndClearIgnoreResultAssign() {
- bool I = IgnoreResultAssign; IgnoreResultAssign = false;
- return I; }
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
@@ -70,25 +74,25 @@ public:
Value *EmitLoadOfLValue(LValue LV, QualType T) {
return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
}
-
+
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
return EmitLoadOfLValue(EmitLValue(E), E->getType());
}
-
+
/// EmitConversionToBool - Convert the specified expression value to a
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *EmitConversionToBool(Value *Src, QualType DstTy);
-
+
/// EmitScalarConversion - Emit a conversion from the specified type to the
/// specified destination type, both of which are LLVM scalar types.
Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
/// EmitComplexToScalarConversion - Emit a conversion from the specified
- /// complex type to the specified destination type, where the destination
- /// type is an LLVM scalar type.
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
QualType SrcTy, QualType DstTy);
@@ -106,10 +110,10 @@ public:
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
- return llvm::ConstantInt::get(E->getValue());
+ return llvm::ConstantInt::get(VMContext, E->getValue());
}
Value *VisitFloatingLiteral(const FloatingLiteral *E) {
- return llvm::ConstantFP::get(E->getValue());
+ return llvm::ConstantFP::get(VMContext, E->getValue());
}
Value *VisitCharacterLiteral(const CharacterLiteral *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
@@ -130,32 +134,33 @@ public:
}
Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
- llvm::Value *V =
- llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ llvm::Value *V =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
CGF.GetIDForAddrOfLabel(E->getLabel()));
-
+
return Builder.CreateIntToPtr(V, ConvertType(E->getType()));
}
-
+
// l-values.
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl()))
- return llvm::ConstantInt::get(EC->getInitVal());
+ return llvm::ConstantInt::get(VMContext, EC->getInitVal());
return EmitLoadOfLValue(E);
}
- Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
- return CGF.EmitObjCSelectorExpr(E);
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
}
- Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
- return CGF.EmitObjCProtocolExpr(E);
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
}
- Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
return EmitLoadOfLValue(E);
}
Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
return EmitLoadOfLValue(E);
}
- Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) {
+ Value *VisitObjCImplicitSetterGetterRefExpr(
+ ObjCImplicitSetterGetterRefExpr *E) {
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
@@ -173,7 +178,7 @@ public:
Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) {
return EmitLValue(E).getAddress();
}
-
+
Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); }
Value *VisitInitListExpr(InitListExpr *E) {
@@ -181,66 +186,67 @@ public:
(void)Ignore;
assert (Ignore == false && "init list ignored");
unsigned NumInitElements = E->getNumInits();
-
+
if (E->hadArrayRangeDesignator()) {
CGF.ErrorUnsupported(E, "GNU array range designator extension");
}
- const llvm::VectorType *VType =
+ const llvm::VectorType *VType =
dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
-
+
// We have a scalar in braces. Just use the first element.
- if (!VType)
+ if (!VType)
return Visit(E->getInit(0));
-
+
unsigned NumVectorElements = VType->getNumElements();
const llvm::Type *ElementType = VType->getElementType();
// Emit individual vector element stores.
llvm::Value *V = llvm::UndefValue::get(VType);
-
+
// Emit initializers
unsigned i;
for (i = 0; i < NumInitElements; ++i) {
Value *NewV = Visit(E->getInit(i));
- Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), i);
V = Builder.CreateInsertElement(V, NewV, Idx);
}
-
+
// Emit remaining default initializers
for (/* Do not initialize i*/; i < NumVectorElements; ++i) {
- Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), i);
llvm::Value *NewV = llvm::Constant::getNullValue(ElementType);
V = Builder.CreateInsertElement(V, NewV, Idx);
}
-
+
return V;
}
-
+
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
- Value *VisitImplicitCastExpr(const ImplicitCastExpr *E);
Value *VisitCastExpr(const CastExpr *E) {
// Make sure to evaluate VLA bounds now so that we have them for later.
if (E->getType()->isVariablyModifiedType())
CGF.EmitVLASize(E->getType());
- return EmitCastExpr(E->getSubExpr(), E->getType());
+ return EmitCastExpr(E);
}
- Value *EmitCastExpr(const Expr *E, QualType T);
+ Value *EmitCastExpr(const CastExpr *E);
Value *VisitCallExpr(const CallExpr *E) {
if (E->getCallReturnType()->isReferenceType())
return EmitLoadOfLValue(E);
-
+
return CGF.EmitCallExpr(E).getScalarVal();
}
Value *VisitStmtExpr(const StmtExpr *E);
Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
-
+
// Unary Operators.
Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
Value *VisitUnaryPostDec(const UnaryOperator *E) {
@@ -273,22 +279,40 @@ public:
return Visit(E->getSubExpr());
}
Value *VisitUnaryOffsetOf(const UnaryOperator *E);
-
+
// C++
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
Value *VisitCXXThisExpr(CXXThisExpr *TE) {
return CGF.LoadCXXThis();
- }
-
+ }
+
Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
}
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
}
-
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return 0;
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return 0;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+
// Binary Operators.
Value *EmitMul(const BinOpInfo &Ops) {
if (CGF.getContext().getLangOptions().OverflowChecking
@@ -355,7 +379,7 @@ public:
VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ);
VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE);
#undef VISITCOMP
-
+
Value *VisitBinAssign (const BinaryOperator *E);
Value *VisitBinLAnd (const BinaryOperator *E);
@@ -381,21 +405,30 @@ public:
/// boolean (i1) truth value. This is equivalent to "Val != 0".
Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs");
-
+
if (SrcType->isRealFloatingType()) {
// Compare against 0.0 for fp scalars.
llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
return Builder.CreateFCmpUNE(Src, Zero, "tobool");
}
-
+
+ if (SrcType->isMemberPointerType()) {
+ // FIXME: This is ABI specific.
+
+ // Compare against -1.
+ llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType());
+ return Builder.CreateICmpNE(Src, NegativeOne, "tobool");
+ }
+
assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
"Unknown scalar type to convert");
-
+
// Because of the type rules of C, we often end up computing a logical value,
// then zero extending it to int, then wanting it as a logical value again.
// Optimize this common case.
if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
- if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) {
+ if (ZI->getOperand(0)->getType() ==
+ llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
Value *Result = ZI->getOperand(0);
// If there aren't any more uses, zap the instruction to save space.
// Note that there can be more uses, for example if this
@@ -405,7 +438,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return Result;
}
}
-
+
// Compare against an integer or pointer null.
llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
return Builder.CreateICmpNE(Src, Zero, "tobool");
@@ -418,61 +451,66 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
SrcType = CGF.getContext().getCanonicalType(SrcType);
DstType = CGF.getContext().getCanonicalType(DstType);
if (SrcType == DstType) return Src;
-
+
if (DstType->isVoidType()) return 0;
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstType->isBooleanType())
return EmitConversionToBool(Src, SrcType);
-
+
const llvm::Type *DstTy = ConvertType(DstType);
// Ignore conversions like int -> uint.
if (Src->getType() == DstTy)
return Src;
- // Handle pointer conversions next: pointers can only be converted
- // to/from other pointers and integers. Check for pointer types in
- // terms of LLVM, as some native types (like Obj-C id) may map to a
- // pointer type.
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
if (isa<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
if (isa<llvm::PointerType>(Src->getType()))
return Builder.CreateBitCast(Src, DstTy, "conv");
+
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ const llvm::Type *MiddleTy =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
bool InputSigned = SrcType->isSignedIntegerType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
// Then, cast to pointer.
return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
}
-
+
if (isa<llvm::PointerType>(Src->getType())) {
// Must be an ptr to int cast.
assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
return Builder.CreatePtrToInt(Src, DstTy, "conv");
}
-
+
// A scalar can be splatted to an extended vector of the same element type
- if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) {
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
// Cast the scalar to element type
- QualType EltTy = DstType->getAsExtVectorType()->getElementType();
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
// Insert the element in element zero of an undef vector
llvm::Value *UnV = llvm::UndefValue::get(DstTy);
- llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0);
+ llvm::Value *Idx =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
// Splat the element across to all elements
llvm::SmallVector<llvm::Constant*, 16> Args;
unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
for (unsigned i = 0; i < NumElements; i++)
- Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
-
+ Args.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), 0));
+
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
return Yay;
@@ -482,7 +520,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (isa<llvm::VectorType>(Src->getType()) ||
isa<llvm::VectorType>(DstTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
-
+
// Finally, we have the arithmetic types: real int/float.
if (isa<llvm::IntegerType>(Src->getType())) {
bool InputSigned = SrcType->isSignedIntegerType();
@@ -493,7 +531,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
else
return Builder.CreateUIToFP(Src, DstTy, "conv");
}
-
+
assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
if (isa<llvm::IntegerType>(DstTy)) {
if (DstType->isSignedIntegerType())
@@ -509,15 +547,15 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
return Builder.CreateFPExt(Src, DstTy, "conv");
}
-/// EmitComplexToScalarConversion - Emit a conversion from the specified
-/// complex type to the specified destination type, where the destination
-/// type is an LLVM scalar type.
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
Value *ScalarExprEmitter::
EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
QualType SrcTy, QualType DstTy) {
// Get the source element type.
- SrcTy = SrcTy->getAsComplexType()->getElementType();
-
+ SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
// Handle conversions to bool first, they are special: comparisons against 0.
if (DstTy->isBooleanType()) {
// Complex != 0 -> (Real != 0) | (Imag != 0)
@@ -525,11 +563,11 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
return Builder.CreateOr(Src.first, Src.second, "tobool");
}
-
+
// C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
// the imaginary part of the complex value is discarded and the value of the
// real part is converted according to the conversion rules for the
- // corresponding real type.
+ // corresponding real type.
return EmitScalarConversion(Src.first, SrcTy, DstTy);
}
@@ -565,72 +603,122 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// so we can't get it as an lvalue.
if (!E->getBase()->getType()->isVectorType())
return EmitLoadOfLValue(E);
-
+
// Handle the vector case. The base must be a vector, the index must be an
// integer value.
Value *Base = Visit(E->getBase());
Value *Idx = Visit(E->getIdx());
bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
- Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned,
+ Idx = Builder.CreateIntCast(Idx,
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+ IdxSigned,
"vecidxcast");
return Builder.CreateExtractElement(Base, Idx, "vecext");
}
-/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but
-/// also handle things like function to pointer-to-function decay, and array to
-/// pointer decay.
-Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) {
- const Expr *Op = E->getSubExpr();
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastExpr::CastKind Kind = CE->getCastKind();
- // If this is due to array->pointer conversion, emit the array expression as
- // an l-value.
- if (Op->getType()->isArrayType()) {
- Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays.
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ switch (Kind) {
+ default:
+ // FIXME: Assert here.
+ // assert(0 && "Unhandled cast kind!");
+ break;
+ case CastExpr::CK_Unknown:
+ // FIXME: We should really assert here - Unknown casts should never get
+ // as far as to codegen.
+ break;
+ case CastExpr::CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateBitCast(Src, ConvertType(DestTy));
+ }
+ case CastExpr::CK_ArrayToPointerDecay: {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
// Note that VLA pointers are always decayed, so we don't need to do
// anything here.
- if (!Op->getType()->isVariableArrayType()) {
+ if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
->getElementType()) &&
"Expected pointer to array");
V = Builder.CreateStructGEP(V, 0, "arraydecay");
}
-
+
// The resultant pointer type can be implicitly casted to other pointer
// types as well (e.g. void*) and can be implicitly converted to integer.
- const llvm::Type *DestTy = ConvertType(E->getType());
- if (V->getType() != DestTy) {
- if (isa<llvm::PointerType>(DestTy))
- V = Builder.CreateBitCast(V, DestTy, "ptrconv");
+ const llvm::Type *DestLTy = ConvertType(DestTy);
+ if (V->getType() != DestLTy) {
+ if (isa<llvm::PointerType>(DestLTy))
+ V = Builder.CreateBitCast(V, DestLTy, "ptrconv");
else {
- assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay");
- V = Builder.CreatePtrToInt(V, DestTy, "ptrconv");
+ assert(isa<llvm::IntegerType>(DestLTy) && "Unknown array decay");
+ V = Builder.CreatePtrToInt(V, DestLTy, "ptrconv");
}
}
return V;
}
+ case CastExpr::CK_NullToMemberPointer:
+ return CGF.CGM.EmitNullConstant(DestTy);
+
+ case CastExpr::CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ const RecordType *BaseClassTy =
+ DestTy->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
+
+ Value *Src = Visit(const_cast<Expr*>(E));
- return EmitCastExpr(Op, E->getType());
-}
+ bool NullCheckValue = true;
+
+ if (isa<CXXThisExpr>(E)) {
+ // We always assume that 'this' is never null.
+ NullCheckValue = false;
+ } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that lvalue casts are never null.
+ if (ICE->isLvalueCast())
+ NullCheckValue = false;
+ }
+ return CGF.GetAddressCXXOfBaseClass(Src, DerivedClassDecl, BaseClassDecl,
+ NullCheckValue);
+ }
+ case CastExpr::CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateIntToPtr(Src, ConvertType(DestTy));
+ }
-// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
-// have to handle a more broad range of conversions than explicit casts, as they
-// handle things like function to ptr-to-function decay etc.
-Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) {
- if (!DestTy->isVoidType())
- TestAndClearIgnoreResultAssign();
+ case CastExpr::CK_PointerToIntegral: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
+ }
+
+ }
// Handle cases where the source is an non-complex type.
-
+
if (!CGF.hasAggregateLLVMType(E->getType())) {
Value *Src = Visit(const_cast<Expr*>(E));
// Use EmitScalarConversion to perform the conversion.
return EmitScalarConversion(Src, E->getType(), DestTy);
}
-
+
if (E->getType()->isAnyComplexType()) {
// Handle cases where the source is a complex type.
bool IgnoreImag = true;
@@ -661,7 +749,10 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
}
Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp");
+ llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
+ if (E->getType().isObjCGCWeak())
+ return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
+ return Builder.CreateLoad(V, false, "tmp");
}
//===----------------------------------------------------------------------===//
@@ -673,55 +764,80 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E,
LValue LV = EmitLValue(E->getSubExpr());
QualType ValTy = E->getSubExpr()->getType();
Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal();
-
+
+ llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+
int AmountVal = isInc ? 1 : -1;
if (ValTy->isPointerType() &&
- ValTy->getAsPointerType()->isVariableArrayType()) {
+ ValTy->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition/subtraction needs to account for the VLA size
CGF.ErrorUnsupported(E, "VLA pointer inc/dec");
}
Value *NextVal;
- if (const llvm::PointerType *PT =
+ if (const llvm::PointerType *PT =
dyn_cast<llvm::PointerType>(InVal->getType())) {
- llvm::Constant *Inc =llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal);
+ llvm::Constant *Inc =
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
if (!isa<llvm::FunctionType>(PT->getElementType())) {
- NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec");
+ QualType PTEE = ValTy->getPointeeType();
+ if (const ObjCInterfaceType *OIT =
+ dyn_cast<ObjCInterfaceType>(PTEE)) {
+ // Handle interface types, which are not represented with a concrete type.
+ int size = CGF.getContext().getTypeSize(OIT) / 8;
+ if (!isInc)
+ size = -size;
+ Inc = llvm::ConstantInt::get(Inc->getType(), size);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+ InVal = Builder.CreateBitCast(InVal, i8Ty);
+ NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+ llvm::Value *lhs = LV.getAddress();
+ lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+ LV = LValue::MakeAddr(lhs, CGF.MakeQualifiers(ValTy));
+ } else
+ NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
} else {
- const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
}
- } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) {
+ } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
// Bool++ is an interesting case, due to promotion rules, we get:
// Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
// Bool = ((int)Bool+1) != 0
// An interesting aspect of this is that increment is always true.
// Decrement does not have this property.
- NextVal = llvm::ConstantInt::getTrue();
+ NextVal = llvm::ConstantInt::getTrue(VMContext);
} else if (isa<llvm::IntegerType>(InVal->getType())) {
NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
- NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+
+ // Signed integer overflow is undefined behavior.
+ if (ValTy->isSignedIntegerType())
+ NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+ else
+ NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
} else {
// Add the inc/dec to the real part.
- if (InVal->getType() == llvm::Type::FloatTy)
- NextVal =
- llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal)));
- else if (InVal->getType() == llvm::Type::DoubleTy)
- NextVal =
- llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal)));
+ if (InVal->getType()->isFloatTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(AmountVal)));
+ else if (InVal->getType()->isDoubleTy())
+ NextVal =
+ llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(AmountVal)));
else {
llvm::APFloat F(static_cast<float>(AmountVal));
bool ignored;
F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
&ignored);
- NextVal = llvm::ConstantFP::get(F);
+ NextVal = llvm::ConstantFP::get(VMContext, F);
}
NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
}
-
+
// Store the updated result through the lvalue.
if (LV.isBitfield())
CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy,
@@ -752,12 +868,12 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
// Compare operand to zero.
Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
-
+
// Invert value.
// TODO: Could dynamically modify easy computations here. For example, if
// the operand is an icmp ne, turn into icmp eq.
BoolVal = Builder.CreateNot(BoolVal, "lnot");
-
+
// ZExt result to the expr type.
return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
}
@@ -768,7 +884,7 @@ Value *
ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
QualType TypeToSize = E->getTypeOfArgument();
if (E->isSizeOf()) {
- if (const VariableArrayType *VAT =
+ if (const VariableArrayType *VAT =
CGF.getContext().getAsVariableArrayType(TypeToSize)) {
if (E->isArgumentType()) {
// sizeof(type) - make sure to emit the VLA size.
@@ -778,16 +894,16 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
// VLA, it is evaluated.
CGF.EmitAnyExpr(E->getArgumentExpr());
}
-
+
return CGF.GetVLASize(VAT);
}
}
- // If this isn't sizeof(vla), the result must be constant; use the
- // constant folding logic so we don't have to duplicate it here.
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
Expr::EvalResult Result;
E->Evaluate(Result, CGF.getContext());
- return llvm::ConstantInt::get(Result.Val.getInt());
+ return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
}
Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
@@ -800,7 +916,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
Expr *Op = E->getSubExpr();
if (Op->getType()->isAnyComplexType())
return CGF.EmitComplexExpr(Op, true, false, true, false).second;
-
+
// __imag on a scalar returns zero. Emit the subexpr to ensure side
// effects are evaluated, but not the actual value.
if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
@@ -810,8 +926,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
-Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
-{
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) {
Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
const llvm::Type* ResultType = ConvertType(E->getType());
return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
@@ -839,10 +954,10 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
BinOpInfo OpInfo;
if (E->getComputationResultType()->isAnyComplexType()) {
- // This needs to go through the complex expression emitter, but
- // it's a tad complicated to do that... I'm leaving it out for now.
- // (Note that we do actually need the imaginary part of the RHS for
- // multiplication and division.)
+ // This needs to go through the complex expression emitter, but it's a tad
+ // complicated to do that... I'm leaving it out for now. (Note that we do
+ // actually need the imaginary part of the RHS for multiplication and
+ // division.)
CGF.ErrorUnsupported(E, "complex compound assignment");
return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
}
@@ -857,17 +972,17 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
-
+
// Expand the binary operator.
Value *Result = (this->*Func)(OpInfo);
-
+
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
- // Store the result value into the LHS lvalue. Bit-fields are
- // handled specially because the result is altered by the store,
- // i.e., [C99 6.5.16p1] 'An assignment expression has the value of
- // the left operand after the assignment...'.
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
if (LHSLV.isBitfield()) {
if (!LHSLV.isVolatileQualified()) {
CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
@@ -949,31 +1064,31 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
Builder.SetInsertPoint(overflowBB);
// Handler is:
- // long long *__overflow_handler)(long long a, long long b, char op,
+ // long long *__overflow_handler)(long long a, long long b, char op,
// char width)
std::vector<const llvm::Type*> handerArgTypes;
- handerArgTypes.push_back(llvm::Type::Int64Ty);
- handerArgTypes.push_back(llvm::Type::Int64Ty);
- handerArgTypes.push_back(llvm::Type::Int8Ty);
- handerArgTypes.push_back(llvm::Type::Int8Ty);
- llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty,
- handerArgTypes, false);
+ handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+ handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+ llvm::FunctionType *handlerTy = llvm::FunctionType::get(
+ llvm::Type::getInt64Ty(VMContext), handerArgTypes, false);
llvm::Value *handlerFunction =
CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
llvm::PointerType::getUnqual(handlerTy));
handlerFunction = Builder.CreateLoad(handlerFunction);
llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
- Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty),
- Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty),
- llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID),
- llvm::ConstantInt::get(llvm::Type::Int8Ty,
+ Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)),
+ Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)),
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
cast<llvm::IntegerType>(opTy)->getBitWidth()));
handlerResult = Builder.CreateTrunc(handlerResult, opTy);
Builder.CreateBr(continueBB);
-
+
// Set up the continuation
Builder.SetInsertPoint(continueBB);
// Get the correct result
@@ -986,31 +1101,39 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
}
Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
- if (!Ops.Ty->isPointerType()) {
+ if (!Ops.Ty->isAnyPointerType()) {
if (CGF.getContext().getLangOptions().OverflowChecking &&
Ops.Ty->isSignedIntegerType())
return EmitOverflowCheckedBinOp(Ops);
-
+
if (Ops.LHS->getType()->isFPOrFPVector())
return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
-
+
+ // Signed integer overflow is undefined behavior.
+ if (Ops.Ty->isSignedIntegerType())
+ return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+
return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
}
- if (Ops.Ty->getAsPointerType()->isVariableArrayType()) {
+ if (Ops.Ty->isPointerType() &&
+ Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size
CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
}
Value *Ptr, *Idx;
Expr *IdxExp;
- const PointerType *PT;
- if ((PT = Ops.E->getLHS()->getType()->getAsPointerType())) {
+ const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
+ const ObjCObjectPointerType *OPT =
+ Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+ if (PT || OPT) {
Ptr = Ops.LHS;
Idx = Ops.RHS;
IdxExp = Ops.E->getRHS();
- } else { // int + pointer
- PT = Ops.E->getRHS()->getType()->getAsPointerType();
- assert(PT && "Invalid add expr");
+ } else { // int + pointer
+ PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
+ OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+ assert((PT || OPT) && "Invalid add expr");
Ptr = Ops.RHS;
Idx = Ops.LHS;
IdxExp = Ops.E->getLHS();
@@ -1020,38 +1143,37 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ const llvm::Type *IdxType =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
if (IdxExp->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
}
-
- const QualType ElementType = PT->getPointeeType();
- // Handle interface types, which are not represented with a concrete
- // type.
+ const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
+ // Handle interface types, which are not represented with a concrete type.
if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
- llvm::Value *InterfaceSize =
+ llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
CGF.getContext().getTypeSize(OIT) / 8);
Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ptr->getType());
- }
+ }
- // Explicitly handle GNU void* and function pointer arithmetic
- // extensions. The GNU void* casts amount to no-ops since our void*
- // type is i8*, but this is future proof.
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
if (ElementType->isVoidType() || ElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ptr->getType());
- }
-
- return Builder.CreateGEP(Ptr, Idx, "add.ptr");
+ }
+
+ return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
}
Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
@@ -1065,7 +1187,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
}
- if (Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) {
+ if (Ops.E->getLHS()->getType()->isPointerType() &&
+ Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
// The amount of the addition needs to account for the VLA size for
// ptr-int
// The amount of the division needs to account for the VLA size for
@@ -1074,7 +1197,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
}
const QualType LHSType = Ops.E->getLHS()->getType();
- const QualType LHSElementType = LHSType->getAsPointerType()->getPointeeType();
+ const QualType LHSElementType = LHSType->getPointeeType();
if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
// pointer - int
Value *Idx = Ops.RHS;
@@ -1082,7 +1205,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (Width < CGF.LLVMPointerWidth) {
// Zero or sign extend the pointer value based on whether the index is
// signed or not.
- const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth);
+ const llvm::Type *IdxType =
+ llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
if (Ops.E->getRHS()->getType()->isSignedIntegerType())
Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
else
@@ -1090,36 +1214,35 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
}
Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
- // Handle interface types, which are not represented with a concrete
- // type.
- if (const ObjCInterfaceType *OIT =
+ // Handle interface types, which are not represented with a concrete type.
+ if (const ObjCInterfaceType *OIT =
dyn_cast<ObjCInterfaceType>(LHSElementType)) {
- llvm::Value *InterfaceSize =
+ llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
CGF.getContext().getTypeSize(OIT) / 8);
Idx = Builder.CreateMul(Idx, InterfaceSize);
- const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
+ }
// Explicitly handle GNU void* and function pointer arithmetic
- // extensions. The GNU void* casts amount to no-ops since our
- // void* type is i8*, but this is future proof.
+ // extensions. The GNU void* casts amount to no-ops since our void* type is
+ // i8*, but this is future proof.
if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
- const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
return Builder.CreateBitCast(Res, Ops.LHS->getType());
- }
-
- return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr");
+ }
+
+ return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
} else {
// pointer - pointer
Value *LHS = Ops.LHS;
Value *RHS = Ops.RHS;
-
+
uint64_t ElementSize;
// Handle GCC extension for pointer arithmetic on void* and function pointer
@@ -1129,28 +1252,21 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
} else {
ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8;
}
-
+
const llvm::Type *ResultType = ConvertType(Ops.Ty);
LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
-
+
// Optimize out the shift for element size of 1.
if (ElementSize == 1)
return BytesBetween;
-
- // HACK: LLVM doesn't have an divide instruction that 'knows' there is no
- // remainder. As such, we handle common power-of-two cases here to generate
- // better code. See PR2247.
- if (llvm::isPowerOf2_64(ElementSize)) {
- Value *ShAmt =
- llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize));
- return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr");
- }
-
- // Otherwise, do a full sdiv.
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize);
- return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+ return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
}
}
@@ -1160,7 +1276,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Value *RHS = Ops.RHS;
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
-
+
return Builder.CreateShl(Ops.LHS, RHS, "shl");
}
@@ -1170,7 +1286,7 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
Value *RHS = Ops.RHS;
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
-
+
if (Ops.Ty->isUnsignedIntegerType())
return Builder.CreateLShr(Ops.LHS, RHS, "shr");
return Builder.CreateAShr(Ops.LHS, RHS, "shr");
@@ -1181,11 +1297,11 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
- if (!LHSTy->isAnyComplexType() && !LHSTy->isVectorType()) {
+ if (!LHSTy->isAnyComplexType()) {
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
-
- if (LHS->getType()->isFloatingPoint()) {
+
+ if (LHS->getType()->isFPOrFPVector()) {
Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
LHS, RHS, "cmp");
} else if (LHSTy->isSignedIntegerType()) {
@@ -1196,29 +1312,19 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
LHS, RHS, "cmp");
}
- } else if (LHSTy->isVectorType()) {
- Value *LHS = Visit(E->getLHS());
- Value *RHS = Visit(E->getRHS());
-
- if (LHS->getType()->isFPOrFPVector()) {
- Result = Builder.CreateVFCmp((llvm::CmpInst::Predicate)FCmpOpc,
- LHS, RHS, "cmp");
- } else if (LHSTy->isUnsignedIntegerType()) {
- Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)UICmpOpc,
- LHS, RHS, "cmp");
- } else {
- // Signed integers and pointers.
- Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)SICmpOpc,
- LHS, RHS, "cmp");
- }
- return Result;
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
} else {
// Complex Comparison: can only be an equality comparison.
CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
-
- QualType CETy = LHSTy->getAsComplexType()->getElementType();
-
+
+ QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
Value *ResultR, *ResultI;
if (CETy->isRealFloatingType()) {
ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
@@ -1233,7 +1339,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
LHS.second, RHS.second, "cmp.i");
}
-
+
if (E->getOpcode() == BinaryOperator::EQ) {
Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
} else {
@@ -1253,7 +1359,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// improve codegen just a little.
Value *RHS = Visit(E->getRHS());
LValue LHS = EmitLValue(E->getLHS());
-
+
// Store the value into the LHS. Bit-fields are handled specially
// because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after
@@ -1281,12 +1387,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// ZExt result to int.
return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext");
}
-
+
// 0 && RHS: If it is safe, just elide the RHS, and return 0.
if (!CGF.ContainsLabel(E->getRHS()))
return llvm::Constant::getNullValue(CGF.LLVMIntTy);
}
-
+
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
@@ -1296,17 +1402,18 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Any edges into the ContBlock are now from an (indeterminate number of)
// edges from this first condition. All of these values will be false. Start
// setting up the PHI node in the Cont Block for this.
- llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
- PN->addIncoming(llvm::ConstantInt::getFalse(), *PI);
-
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
CGF.PopConditionalTempDestruction();
-
+
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
@@ -1314,7 +1421,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// into the phi node for the edge with the value of RHSCond.
CGF.EmitBlock(ContBlock);
PN->addIncoming(RHSCond, RHSBlock);
-
+
// ZExt result to int.
return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext");
}
@@ -1328,43 +1435,44 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
// ZExt result to int.
return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext");
}
-
+
// 1 || RHS: If it is safe, just elide the RHS, and return 1.
if (!CGF.ContainsLabel(E->getRHS()))
return llvm::ConstantInt::get(CGF.LLVMIntTy, 1);
}
-
+
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
-
+
// Branch on the LHS first. If it is true, go to the success (cont) block.
CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
// Any edges into the ContBlock are now from an (indeterminate number of)
// edges from this first condition. All of these values will be true. Start
// setting up the PHI node in the Cont Block for this.
- llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock);
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+ "", ContBlock);
PN->reserveOperandSpace(2); // Normal case, two inputs.
for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
PI != PE; ++PI)
- PN->addIncoming(llvm::ConstantInt::getTrue(), *PI);
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
CGF.PushConditionalTempDestruction();
// Emit the RHS condition as a bool value.
CGF.EmitBlock(RHSBlock);
Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
-
+
CGF.PopConditionalTempDestruction();
-
+
// Reaquire the RHS block, as there may be subblocks inserted.
RHSBlock = Builder.GetInsertBlock();
-
+
// Emit an unconditional branch from this block to ContBlock. Insert an entry
// into the phi node for the edge with the value of RHSCond.
CGF.EmitBlock(ContBlock);
PN->addIncoming(RHSCond, RHSBlock);
-
+
// ZExt result to int.
return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext");
}
@@ -1386,19 +1494,19 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) {
if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr());
-
+
// TODO: Allow anything we can constant fold to an integer or fp constant.
if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
isa<FloatingLiteral>(E))
return true;
-
+
// Non-volatile automatic variables too, to get "cond ? X : Y" where
// X and Y are local variables.
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified())
return true;
-
+
return false;
}
@@ -1412,7 +1520,7 @@ VisitConditionalOperator(const ConditionalOperator *E) {
Expr *Live = E->getLHS(), *Dead = E->getRHS();
if (Cond == -1)
std::swap(Live, Dead);
-
+
// If the dead side doesn't have labels we need, and if the Live side isn't
// the gnu missing ?: extension (which we could handle, but don't bother
// to), just emit the Live part.
@@ -1420,8 +1528,8 @@ VisitConditionalOperator(const ConditionalOperator *E) {
Live) // Live part isn't missing.
return Visit(Live);
}
-
-
+
+
// If this is a really simple expression (like x ? 4 : 5), emit this as a
// select instead of as control flow. We can only do this if it is cheap and
// safe to evaluate the LHS and RHS unconditionally.
@@ -1432,15 +1540,15 @@ VisitConditionalOperator(const ConditionalOperator *E) {
llvm::Value *RHS = Visit(E->getRHS());
return Builder.CreateSelect(CondV, LHS, RHS, "cond");
}
-
-
+
+
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
Value *CondVal = 0;
- // If we don't have the GNU missing condition extension, emit a branch on
- // bool the normal way.
+ // If we don't have the GNU missing condition extension, emit a branch on bool
+ // the normal way.
if (E->getLHS()) {
// Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
// the branch on bool.
@@ -1450,7 +1558,7 @@ VisitConditionalOperator(const ConditionalOperator *E) {
// convert it to bool the hard way. We do this explicitly because we need
// the unconverted value for the missing middle value of the ?:.
CondVal = CGF.EmitScalarExpr(E->getCond());
-
+
// In some cases, EmitScalarConversion will delete the "CondVal" expression
// if there are no extra uses (an optimization). Inhibit this by making an
// extra dead use, because we're going to add a use of CondVal later. We
@@ -1458,7 +1566,7 @@ VisitConditionalOperator(const ConditionalOperator *E) {
// away. This leaves dead code, but the ?: extension isn't common.
new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
Builder.GetInsertBlock());
-
+
Value *CondBoolVal =
CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
CGF.getContext().BoolTy);
@@ -1467,33 +1575,33 @@ VisitConditionalOperator(const ConditionalOperator *E) {
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(LHSBlock);
-
+
// Handle the GNU extension for missing LHS.
Value *LHS;
if (E->getLHS())
LHS = Visit(E->getLHS());
else // Perform promotions, to handle cases like "short ?: int"
LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
-
+
CGF.PopConditionalTempDestruction();
LHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
-
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
-
+
Value *RHS = Visit(E->getRHS());
CGF.PopConditionalTempDestruction();
RHSBlock = Builder.GetInsertBlock();
CGF.EmitBranch(ContBlock);
-
+
CGF.EmitBlock(ContBlock);
-
+
if (!LHS || !RHS) {
assert(E->getType()->isVoidType() && "Non-void value should have a value");
return 0;
}
-
+
// Create a PHI node for the real part.
llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
PN->reserveOperandSpace(2);
@@ -1511,7 +1619,7 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
// If EmitVAArg fails, we fall back to the LLVM instruction.
- if (!ArgPtr)
+ if (!ArgPtr)
return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
// FIXME Volatility.
@@ -1526,12 +1634,12 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
// Entry Point into this File
//===----------------------------------------------------------------------===//
-/// EmitScalarExpr - Emit the computation of the specified expression of
-/// scalar type, ignoring the result.
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
assert(E && !hasAggregateLLVMType(E->getType()) &&
"Invalid scalar expression to emit");
-
+
return ScalarExprEmitter(*this, IgnoreResultAssign)
.Visit(const_cast<Expr*>(E));
}
@@ -1545,9 +1653,9 @@ Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
}
-/// EmitComplexToScalarConversion - Emit a conversion from the specified
-/// complex type to the specified destination type, where the destination
-/// type is an LLVM scalar type.
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
QualType SrcTy,
QualType DstTy) {
@@ -1560,38 +1668,40 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) {
assert(V1->getType() == V2->getType() &&
"Vector operands must be of the same type");
- unsigned NumElements =
+ unsigned NumElements =
cast<llvm::VectorType>(V1->getType())->getNumElements();
-
+
va_list va;
va_start(va, V2);
-
+
llvm::SmallVector<llvm::Constant*, 16> Args;
for (unsigned i = 0; i < NumElements; i++) {
int n = va_arg(va, int);
- assert(n >= 0 && n < (int)NumElements * 2 &&
+ assert(n >= 0 && n < (int)NumElements * 2 &&
"Vector shuffle index out of bounds!");
- Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n));
+ Args.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), n));
}
-
+
const char *Name = va_arg(va, const char *);
va_end(va);
-
+
llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
-
+
return Builder.CreateShuffleVector(V1, V2, Mask, Name);
}
-llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
+llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals,
unsigned NumVals, bool isSplat) {
llvm::Value *Vec
= llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals));
-
+
for (unsigned i = 0, e = NumVals; i != e; ++i) {
llvm::Value *Val = isSplat ? Vals[0] : Vals[i];
- llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i);
+ llvm::Value *Idx = llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), i);
Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp");
}
-
- return Vec;
+
+ return Vec;
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 33cb5bca3869..cadba328bf12 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -24,7 +24,7 @@ using namespace clang;
using namespace CodeGen;
/// Emits an instance of NSConstantString representing the object.
-llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
// FIXME: This bitcast should just be made an invariant on the Runtime.
@@ -50,7 +50,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
// Only the lookup mechanism and first two arguments of the method
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
-
+
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
const Expr *ReceiverExpr = E->getReceiver();
bool isSuperMessage = false;
@@ -70,7 +70,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
} else {
Receiver = Runtime.GetClass(Builder, OID);
}
-
+
isClassMessage = true;
} else if (isa<ObjCSuperExpr>(E->getReceiver())) {
isSuperMessage = true;
@@ -81,7 +81,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
-
+
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
@@ -92,9 +92,11 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
isCategoryImpl,
Receiver,
isClassMessage,
- Args);
+ Args,
+ E->getMethodDecl());
}
- return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+
+ return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
Receiver, isClassMessage, Args,
E->getMethodDecl());
}
@@ -110,7 +112,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
- Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
OMD->getSelfDecl()->getType()));
Args.push_back(std::make_pair(OMD->getCmdDecl(),
OMD->getCmdDecl()->getType()));
@@ -123,10 +125,10 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
}
/// Generate an Objective-C method. An Objective-C method is a C function with
-/// its pointer, name, and types registered in the class struture.
+/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
// Check if we should generate debug info for this method.
- if (CGM.getDebugInfo() && !OMD->hasAttr<NodebugAttr>())
+ if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
DebugInfo = CGM.getDebugInfo();
StartObjCMethod(OMD, OMD->getClassInterface());
EmitStmt(OMD->getBody());
@@ -159,9 +161,9 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
(PD->getSetterKind() == ObjCPropertyDecl::Copy ||
PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *GetPropertyFn =
+ llvm::Value *GetPropertyFn =
CGM.getObjCRuntime().GetPropertyGetFunction();
-
+
if (!GetPropertyFn) {
CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
FinishFunction();
@@ -175,7 +177,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
ValueDecl *Cmd = OMD->getCmdDecl();
llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
+ llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
llvm::Value *True =
@@ -187,24 +189,23 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
GetPropertyFn, Args);
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
} else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
if (hasAggregateLLVMType(Ivar->getType())) {
EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
- }
- else {
+ } else {
CodeGenTypes &Types = CGM.getTypes();
RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
+ Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
}
}
@@ -227,7 +228,7 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
StartObjCMethod(OMD, IMP->getClassInterface());
bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
- bool IsAtomic =
+ bool IsAtomic =
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
// Determine if we should use an objc_setProperty call for
@@ -237,16 +238,16 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
if (IsCopy ||
(CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *SetPropertyFn =
+ llvm::Value *SetPropertyFn =
CGM.getObjCRuntime().GetPropertySetFunction();
-
+
if (!SetPropertyFn) {
CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
FinishFunction();
return;
}
-
- // Emit objc_setProperty((id) self, _cmd, offset, arg,
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
@@ -254,11 +255,11 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
ValueDecl *Cmd = OMD->getCmdDecl();
llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
+ llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsId =
+ llvm::Value *ArgAsId =
Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
Types.ConvertType(IdTy));
llvm::Value *True =
@@ -270,13 +271,13 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
- Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
getContext().BoolTy));
- Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
getContext().BoolTy));
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
SetPropertyFn, Args);
} else {
SourceLocation Loc = PD->getLocation();
@@ -305,18 +306,18 @@ llvm::Value *CodeGenFunction::LoadObjCSelf() {
QualType CodeGenFunction::TypeOfSelfObject() {
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
- const PointerType *PTy =
- cast<PointerType>(getContext().getCanonicalType(selfDecl->getType()));
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
return PTy->getPointeeType();
}
-RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
const Selector &S) {
llvm::Value *Receiver = LoadObjCSelf();
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -324,36 +325,36 @@ RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
Receiver,
isClassMessage,
CallArgList());
-
+
}
RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
+ Exp = Exp->IgnoreParens();
// FIXME: Split it into two separate routines.
if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
Selector S = E->getProperty()->getGetterName();
if (isa<ObjCSuperExpr>(E->getBase()))
return EmitObjCSuperPropertyGet(E, S);
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
- EmitScalarExpr(E->getBase()),
+ GenerateMessageSend(*this, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
false, CallArgList());
- }
- else {
- const ObjCKVCRefExpr *KE = cast<ObjCKVCRefExpr>(Exp);
+ } else {
+ const ObjCImplicitSetterGetterRefExpr *KE =
+ cast<ObjCImplicitSetterGetterRefExpr>(Exp);
Selector S = KE->getGetterMethod()->getSelector();
llvm::Value *Receiver;
- if (KE->getClassProp()) {
- const ObjCInterfaceDecl *OID = KE->getClassProp();
+ if (KE->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- }
- else if (isa<ObjCSuperExpr>(KE->getBase()))
+ } else if (isa<ObjCSuperExpr>(KE->getBase()))
return EmitObjCSuperPropertyGet(KE, S);
- else
+ else
Receiver = EmitScalarExpr(KE->getBase());
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
- Receiver,
- KE->getClassProp() != 0, CallArgList());
+ GenerateMessageSend(*this, Exp->getType(), S,
+ Receiver,
+ KE->getInterfaceDecl() != 0, CallArgList());
}
}
@@ -366,7 +367,7 @@ void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
Args.push_back(std::make_pair(Src, Exp->getType()));
- CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -385,42 +386,39 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
if (isa<ObjCSuperExpr>(E->getBase())) {
EmitObjCSuperPropertySet(E, S, Src);
return;
- }
+ }
CallArgList Args;
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
- EmitScalarExpr(E->getBase()),
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
false, Args);
- }
- else if (const ObjCKVCRefExpr *E = dyn_cast<ObjCKVCRefExpr>(Exp)) {
+ } else if (const ObjCImplicitSetterGetterRefExpr *E =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
Selector S = E->getSetterMethod()->getSelector();
CallArgList Args;
llvm::Value *Receiver;
- if (E->getClassProp()) {
- const ObjCInterfaceDecl *OID = E->getClassProp();
+ if (E->getInterfaceDecl()) {
+ const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
- }
- else if (isa<ObjCSuperExpr>(E->getBase())) {
+ } else if (isa<ObjCSuperExpr>(E->getBase())) {
EmitObjCSuperPropertySet(E, S, Src);
return;
- }
- else
+ } else
Receiver = EmitScalarExpr(E->getBase());
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
- Receiver,
- E->getClassProp() != 0, Args);
- }
- else
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ Receiver,
+ E->getInterfaceDecl() != 0, Args);
+ } else
assert (0 && "bad expression node in EmitObjCPropertySet");
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFn =
+ llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
llvm::Value *DeclAddress;
QualType ElementTy;
-
+
if (!EnumerationMutationFn) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
@@ -431,62 +429,62 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
const Decl* D = SD->getSingleDecl();
ElementTy = cast<ValueDecl>(D)->getType();
- DeclAddress = LocalDeclMap[D];
+ DeclAddress = LocalDeclMap[D];
} else {
ElementTy = cast<Expr>(S.getElement())->getType();
DeclAddress = 0;
}
-
+
// Fast enumeration state.
QualType StateTy = getContext().getObjCFastEnumerationStateType();
- llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
+ llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
"state.ptr");
- StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
+ StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
EmitMemSetToZero(StatePtr, StateTy);
-
+
// Number of elements in the items array.
static const unsigned NumItems = 16;
-
+
// Get selector
llvm::SmallVector<IdentifierInfo*, 3> II;
II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
II.push_back(&CGM.getContext().Idents.get("objects"));
II.push_back(&CGM.getContext().Idents.get("count"));
- Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
+ Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
&II[0]);
QualType ItemsTy =
getContext().getConstantArrayType(getContext().getObjCIdType(),
- llvm::APInt(32, NumItems),
+ llvm::APInt(32, NumItems),
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateTempAlloca(ConvertType(ItemsTy), "items.ptr");
-
+
llvm::Value *Collection = EmitScalarExpr(S.getCollection());
-
+
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(StatePtr),
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
getContext().getPointerType(StateTy)));
-
- Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
getContext().getPointerType(ItemsTy)));
-
+
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
- Args.push_back(std::make_pair(RValue::get(Count),
+ Args.push_back(std::make_pair(RValue::get(Count),
getContext().UnsignedLongTy));
-
- RValue CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
FastEnumSel,
Collection, false, Args);
llvm::Value *LimitPtr = CreateTempAlloca(UnsignedLongLTy, "limit.ptr");
Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
-
+
llvm::BasicBlock *NoElements = createBasicBlock("noelements");
llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
-
+
llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
@@ -494,60 +492,60 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
EmitBlock(SetStartMutations);
-
- llvm::Value *StartMutationsPtr =
+
+ llvm::Value *StartMutationsPtr =
CreateTempAlloca(UnsignedLongLTy);
-
- llvm::Value *StateMutationsPtrPtr =
+
+ llvm::Value *StateMutationsPtrPtr =
Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
- llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
"mutationsptr");
-
- llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
"mutations");
-
+
Builder.CreateStore(StateMutations, StartMutationsPtr);
-
+
llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
EmitBlock(LoopStart);
llvm::Value *CounterPtr = CreateTempAlloca(UnsignedLongLTy, "counter.ptr");
Builder.CreateStore(Zero, CounterPtr);
-
- llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
EmitBlock(LoopBody);
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
- llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
"mutations");
- llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
StartMutations,
"tobool");
-
-
+
+
llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
-
+
Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
-
+
EmitBlock(WasMutated);
llvm::Value *V =
- Builder.CreateBitCast(Collection,
+ Builder.CreateBitCast(Collection,
ConvertType(getContext().getObjCIdType()),
"tmp");
CallArgList Args2;
- Args2.push_back(std::make_pair(RValue::get(V),
+ Args2.push_back(std::make_pair(RValue::get(V),
getContext().getObjCIdType()));
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
EnumerationMutationFn, Args2);
-
+
EmitBlock(WasNotMutated);
-
- llvm::Value *StateItemsPtr =
+
+ llvm::Value *StateItemsPtr =
Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
@@ -555,39 +553,39 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
"stateitems");
- llvm::Value *CurrentItemPtr =
+ llvm::Value *CurrentItemPtr =
Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
-
+
llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
-
+
// Cast the item to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem,
ConvertType(ElementTy), "tmp");
-
+
if (!DeclAddress) {
LValue LV = EmitLValue(cast<Expr>(S.getElement()));
-
+
// Set the value to null.
Builder.CreateStore(CurrentItem, LV.getAddress());
} else
Builder.CreateStore(CurrentItem, DeclAddress);
-
+
// Increment the counter.
- Counter = Builder.CreateAdd(Counter,
+ Counter = Builder.CreateAdd(Counter,
llvm::ConstantInt::get(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
-
+
llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
-
+
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
EmitStmt(S.getBody());
-
+
BreakContinueStack.pop_back();
-
+
EmitBlock(AfterBody);
-
+
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
Counter = Builder.CreateLoad(CounterPtr);
@@ -597,18 +595,18 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch more elements.
EmitBlock(FetchMore);
-
- CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
- FastEnumSel,
+ FastEnumSel,
Collection, false, Args);
Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
Limit = Builder.CreateLoad(LimitPtr);
-
+
IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
Builder.CreateCondBr(IsZero, NoElements, LoopStart);
-
+
// No more elements.
EmitBlock(NoElements);
@@ -616,7 +614,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// If the element was not a declaration, set it to be null.
LValue LV = EmitLValue(cast<Expr>(S.getElement()));
-
+
// Set the value to null.
Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
LV.getAddress());
@@ -625,19 +623,16 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitBlock(LoopEnd);
}
-void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
-{
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
}
-void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
-{
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
CGM.getObjCRuntime().EmitThrowStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
- const ObjCAtSynchronizedStmt &S)
-{
+ const ObjCAtSynchronizedStmt &S) {
CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 6554da9cf98c..f348bfffcb86 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -43,6 +43,7 @@ using llvm::dyn_cast;
static const int RuntimeVersion = 8;
static const int NonFragileRuntimeVersion = 9;
static const int ProtocolVersion = 2;
+static const int NonFragileProtocolVersion = 3;
namespace {
class CGObjCGNU : public CodeGen::CGObjCRuntime {
@@ -50,9 +51,11 @@ private:
CodeGen::CodeGenModule &CGM;
llvm::Module &TheModule;
const llvm::PointerType *SelectorTy;
+ const llvm::IntegerType *Int8Ty;
const llvm::PointerType *PtrToInt8Ty;
const llvm::FunctionType *IMPTy;
const llvm::PointerType *IdTy;
+ QualType ASTIdTy;
const llvm::IntegerType *IntTy;
const llvm::PointerType *PtrTy;
const llvm::IntegerType *LongTy;
@@ -70,6 +73,7 @@ private:
// Some zeros used for GEPs in lots of places.
llvm::Constant *Zeros[2];
llvm::Constant *NULLPtr;
+ llvm::LLVMContext &VMContext;
private:
llvm::Constant *GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
@@ -77,12 +81,19 @@ private:
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
llvm::Constant *GenerateMethodList(const std::string &ClassName,
const std::string &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList);
llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
llvm::Constant *GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols);
+ // To ensure that all protocols are seen by the runtime, we add a category on
+ // a class defined in the runtime, declaring no methods, but adopting the
+ // protocols.
+ void GenerateProtocolHolderCategory(void);
llvm::Constant *GenerateClassStructure(
llvm::Constant *MetaClass,
llvm::Constant *SuperClass,
@@ -92,12 +103,16 @@ private:
llvm::Constant *InstanceSize,
llvm::Constant *IVars,
llvm::Constant *Methods,
- llvm::Constant *Protocols);
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties);
llvm::Constant *GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes);
llvm::Constant *MakeConstantString(const std::string &Str, const std::string
&Name="");
+ llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
+ prefix);
llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
std::vector<llvm::Constant*> &V, const std::string &Name="");
llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
@@ -108,7 +123,7 @@ private:
public:
CGObjCGNU(CodeGen::CodeGenModule &cgm);
virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *);
- virtual CodeGen::RValue
+ virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -116,7 +131,7 @@ public:
bool IsClassMessage,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method);
- virtual CodeGen::RValue
+ virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -124,14 +139,15 @@ public:
bool isCategoryImpl,
llvm::Value *Receiver,
bool IsClassMessage,
- const CallArgList &CallArgs);
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method);
-
- virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD);
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
@@ -139,11 +155,10 @@ public:
const ObjCProtocolDecl *PD);
virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
virtual llvm::Function *ModuleInitFunction();
- virtual void MergeMetadataGlobals(std::vector<llvm::Constant*> &UsedArray);
virtual llvm::Function *GetPropertyGetFunction();
virtual llvm::Function *GetPropertySetFunction();
- virtual llvm::Function *EnumerationMutationFunction();
-
+ virtual llvm::Constant *EnumerationMutationFunction();
+
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -155,9 +170,14 @@ public:
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -173,18 +193,19 @@ public:
/// Emits a reference to a dummy variable which is emitted with each class.
/// This ensures that a linker error will be generated when trying to link
/// together modules where a referenced class is not defined.
-void CGObjCGNU::EmitClassRef(const std::string &className){
+void CGObjCGNU::EmitClassRef(const std::string &className) {
std::string symbolRef = "__objc_class_ref_" + className;
// Don't emit two copies of the same symbol
- if (TheModule.getGlobalVariable(symbolRef)) return;
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
std::string symbolName = "__objc_class_name_" + className;
llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
if (!ClassSymbol) {
- ClassSymbol = new llvm::GlobalVariable(LongTy, false,
- llvm::GlobalValue::ExternalLinkage, 0, symbolName, &TheModule);
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, 0, symbolName);
}
- new llvm::GlobalVariable(ClassSymbol->getType(), true,
- llvm::GlobalValue::CommonLinkage, ClassSymbol, symbolRef, &TheModule);
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
static std::string SymbolNameForClass(const std::string &ClassName) {
@@ -200,36 +221,37 @@ static std::string SymbolNameForMethod(const std::string &ClassName, const
CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
: CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
- MetaClassPtrAlias(0) {
+ MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
IntTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().LongTy));
-
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
Zeros[1] = Zeros[0];
- NULLPtr = llvm::ConstantPointerNull::get(
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty));
- // C string type. Used in lots of places.
- PtrToInt8Ty =
- llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
// Get the selector Type.
SelectorTy = cast<llvm::PointerType>(
CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType()));
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
PtrTy = PtrToInt8Ty;
-
+
// Object type
- IdTy = cast<llvm::PointerType>(
- CGM.getTypes().ConvertType(CGM.getContext().getObjCIdType()));
-
+ ASTIdTy = CGM.getContext().getObjCIdType();
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+
// IMP type
std::vector<const llvm::Type*> IMPArgs;
IMPArgs.push_back(IdTy);
IMPArgs.push_back(SelectorTy);
IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
}
+
// This has to perform the lookup every time, since posing and related
// techniques can modify the name -> class mapping.
llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
@@ -251,10 +273,10 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
if (US == 0)
US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
- llvm::GlobalValue::InternalLinkage,
- ".objc_untyped_selector_alias",
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_untyped_selector_alias"+Sel.getAsString(),
NULL, &TheModule);
-
+
return Builder.CreateLoad(US);
}
@@ -269,15 +291,14 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
SelTypes);
// If it's already cached, return it.
- if (TypedSelectors[Selector])
- {
- return Builder.CreateLoad(TypedSelectors[Selector]);
+ if (TypedSelectors[Selector]) {
+ return Builder.CreateLoad(TypedSelectors[Selector]);
}
// If it isn't, cache it.
llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
llvm::PointerType::getUnqual(SelectorTy),
- llvm::GlobalValue::InternalLinkage, SelName,
+ llvm::GlobalValue::PrivateLinkage, ".objc_selector_alias" + SelName,
NULL, &TheModule);
TypedSelectors[Selector] = Sel;
@@ -286,23 +307,33 @@ llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
const std::string &Name) {
- llvm::Constant * ConstStr = llvm::ConstantArray::get(Str);
- ConstStr = new llvm::GlobalVariable(ConstStr->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- ConstStr, Name, &TheModule);
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
}
+llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
std::vector<llvm::Constant*> &V, const std::string &Name) {
llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- return new llvm::GlobalVariable(Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
}
+
llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
std::vector<llvm::Constant*> &V, const std::string &Name) {
llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- return new llvm::GlobalVariable(Ty, false,
- llvm::GlobalValue::InternalLinkage, C, Name, &TheModule);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ llvm::GlobalValue::InternalLinkage, C, Name);
}
/// Generate an NSConstantString object.
@@ -310,14 +341,14 @@ llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
//an OpenStep implementation, this should let them select their own class for
//constant strings.
llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
- std::string Str(SL->getString()->getStrData(),
+ std::string Str(SL->getString()->getStrData(),
SL->getString()->getByteLength());
std::vector<llvm::Constant*> Ivars;
Ivars.push_back(NULLPtr);
Ivars.push_back(MakeConstantString(Str));
Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+ llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
Ivars, ".objc_str");
ConstantStrings.push_back(
llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty));
@@ -335,21 +366,23 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
bool isCategoryImpl,
llvm::Value *Receiver,
bool IsClassMessage,
- const CallArgList &CallArgs) {
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
llvm::Value *cmd = GetSelector(CGF.Builder, Sel);
CallArgList ActualArgs;
ActualArgs.push_back(
- std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
- CGF.getContext().getObjCIdType()));
+ std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+ ASTIdTy));
ActualArgs.push_back(std::make_pair(RValue::get(cmd),
CGF.getContext().getObjCSelType()));
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
- const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
llvm::Value *ReceiverClass = 0;
if (isCategoryImpl) {
@@ -368,8 +401,8 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
} else {
// Set up global aliases for the metaclass or class pointer if they do not
// already exist. These will are forward-references which will be set to
- // pointers to the class and metaclass structure created for the runtime load
- // function. To send a message to super, we look up the value of the
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
// super_class pointer from either the class or metaclass structure.
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
@@ -388,15 +421,16 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
}
}
// Cast the pointer to a simplified version of the class structure
- ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
- llvm::PointerType::getUnqual(llvm::StructType::get(IdTy, IdTy, NULL)));
+ ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(
+ llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
// Get the superclass pointer
ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
// Load the superclass pointer
ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
// Construct the structure used to look up the IMP
- llvm::StructType *ObjCSuperTy = llvm::StructType::get(Receiver->getType(),
- IdTy, NULL);
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+ Receiver->getType(), IdTy, NULL);
llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0));
@@ -407,7 +441,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
Params.push_back(SelectorTy);
- llvm::Constant *lookupFunction =
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup_super");
@@ -419,7 +453,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
return CGF.EmitCall(FnInfo, imp, ActualArgs);
}
-/// Generate code for a message send expression.
+/// Generate code for a message send expression.
CodeGen::RValue
CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
@@ -428,32 +462,38 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
bool IsClassMessage,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
llvm::Value *cmd;
if (Method)
- cmd = GetSelector(CGF.Builder, Method);
+ cmd = GetSelector(Builder, Method);
else
- cmd = GetSelector(CGF.Builder, Sel);
+ cmd = GetSelector(Builder, Sel);
CallArgList ActualArgs;
+ Receiver = Builder.CreateBitCast(Receiver, IdTy);
ActualArgs.push_back(
- std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
- CGF.getContext().getObjCIdType()));
+ std::make_pair(RValue::get(Receiver), ASTIdTy));
ActualArgs.push_back(std::make_pair(RValue::get(cmd),
CGF.getContext().getObjCSelType()));
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs);
- const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false);
+ const llvm::FunctionType *impType =
+ Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
llvm::Value *imp;
- std::vector<const llvm::Type*> Params;
- Params.push_back(Receiver->getType());
- Params.push_back(SelectorTy);
// For sender-aware dispatch, we pass the sender as the third argument to a
// lookup function. When sending messages from C code, the sender is nil.
- // objc_msg_lookup_sender(id receiver, SEL selector, id sender);
- if (CGM.getContext().getLangOptions().ObjCSenderDispatch) {
+ // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+
+ std::vector<const llvm::Type*> Params;
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+ Params.push_back(ReceiverPtr->getType());
+ Params.push_back(SelectorTy);
llvm::Value *self;
if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
@@ -461,34 +501,55 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
} else {
self = llvm::ConstantPointerNull::get(IdTy);
}
+
Params.push_back(self->getType());
- llvm::Constant *lookupFunction =
+
+ // The lookup function returns a slot, which can be safely cached.
+ llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+ IntTy, llvm::PointerType::getUnqual(impType), NULL);
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::PointerType::getUnqual(impType), Params, true),
+ llvm::PointerType::getUnqual(SlotTy), Params, true),
"objc_msg_lookup_sender");
- imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self);
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ if (llvm::Function *LookupFn = dyn_cast<llvm::Function>(lookupFunction)) {
+ LookupFn->setDoesNotCapture(1);
+ }
+
+ llvm::Value *slot =
+ Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
+ imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ ActualArgs[0] =
+ std::make_pair(RValue::get(Builder.CreateLoad(ReceiverPtr)), ASTIdTy);
} else {
- llvm::Constant *lookupFunction =
+ std::vector<const llvm::Type*> Params;
+ Params.push_back(Receiver->getType());
+ Params.push_back(SelectorTy);
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup");
- imp = CGF.Builder.CreateCall2(lookupFunction, Receiver, cmd);
+ imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
}
return CGF.EmitCall(FnInfo, imp, ActualArgs);
}
-/// Generates a MethodList. Used in construction of a objc_class and
+/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
- const std::string &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList) {
- // Get the method structure type.
- llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
llvm::PointerType::getUnqual(IMPTy), //Method pointer
@@ -501,11 +562,9 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
MethodSels[i].getAsString(),
isClassMethodList))) {
- llvm::Constant *C =
- CGM.GetAddrOfConstantCString(MethodSels[i].getAsString());
- Elements.push_back(llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2));
- Elements.push_back(
- llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
Method = llvm::ConstantExpr::getBitCast(Method,
llvm::PointerType::getUnqual(IMPTy));
Elements.push_back(Method);
@@ -521,10 +580,11 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
// Structure containing list pointer, array and array count
llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
- llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get();
+ llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
- llvm::StructType *ObjCMethodListTy = llvm::StructType::get(NextPtrTy,
- IntTy,
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+ NextPtrTy,
+ IntTy,
ObjCMethodArrayTy,
NULL);
// Refine next pointer type to concrete type
@@ -535,10 +595,10 @@ llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
Methods.clear();
Methods.push_back(llvm::ConstantPointerNull::get(
llvm::PointerType::getUnqual(ObjCMethodListTy)));
- Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty,
+ Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
MethodTypes.size()));
Methods.push_back(MethodArray);
-
+
// Create an instance of the structure
return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
}
@@ -548,8 +608,8 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
- // Get the method structure type.
- llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
@@ -558,10 +618,8 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
Elements.clear();
- Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarNames[i],
- Zeros, 2));
- Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarTypes[i],
- Zeros, 2));
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
Elements.push_back(IvarOffsets[i]);
Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
}
@@ -570,12 +628,12 @@ llvm::Constant *CGObjCGNU::GenerateIvarList(
llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
IvarNames.size());
-
+
Elements.clear();
Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
// Structure containing array and array count
- llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
ObjCIvarArrayTy,
NULL);
@@ -593,11 +651,17 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
llvm::Constant *InstanceSize,
llvm::Constant *IVars,
llvm::Constant *Methods,
- llvm::Constant *Protocols) {
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties) {
// Set up the class structure
// Note: Several of these are char*s when they should be ids. This is
// because the runtime performs this translation on load.
- llvm::StructType *ClassTy = llvm::StructType::get(
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // class_pointer
PtrToInt8Ty, // super_class
PtrToInt8Ty, // name
@@ -606,16 +670,18 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
LongTy, // instance_size
IVars->getType(), // ivars
Methods->getType(), // methods
- // These are all filled in by the runtime, so we pretend
+ // These are all filled in by the runtime, so we pretend
PtrTy, // dtable
PtrTy, // subclass_list
PtrTy, // sibling_class
PtrTy, // protocols
PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
NULL);
llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
- llvm::Constant *NullP =
- llvm::ConstantPointerNull::get(PtrTy);
// Fill in the structure
std::vector<llvm::Constant*> Elements;
Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
@@ -626,11 +692,14 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
Elements.push_back(InstanceSize);
Elements.push_back(IVars);
Elements.push_back(Methods);
- Elements.push_back(NullP);
- Elements.push_back(NullP);
- Elements.push_back(NullP);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
- Elements.push_back(NullP);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(Zero);
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
// Create an instance of the structure
return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name));
}
@@ -638,8 +707,8 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
- // Get the method structure type.
- llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
PtrToInt8Ty,
NULL);
@@ -647,40 +716,40 @@ llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
Elements.clear();
- Elements.push_back( llvm::ConstantExpr::getGetElementPtr(MethodNames[i],
- Zeros, 2));
- Elements.push_back(
- llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2));
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
}
llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
MethodNames.size());
- llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods);
- llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
IntTy, ObjCMethodArrayTy, NULL);
Methods.clear();
Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
Methods.push_back(Array);
return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
}
+
// Create the protocol list structure used in classes, categories and so on
llvm::Constant *CGObjCGNU::GenerateProtocolList(
const llvm::SmallVectorImpl<std::string> &Protocols) {
llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
Protocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
PtrTy, //Should be a recurisve pointer, but it's always NULL here.
LongTy,//FIXME: Should be size_t
ProtocolArrayTy,
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
iter != endIter ; iter++) {
llvm::Constant *protocol = ExistingProtocols[*iter];
if (!protocol)
protocol = GenerateEmptyProtocol(*iter);
- llvm::Constant *Ptr =
- llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty);
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
Elements.push_back(Ptr);
}
llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
@@ -692,10 +761,10 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(
return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
}
-llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
- const llvm::Type *T =
+ const llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
@@ -706,27 +775,31 @@ llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
- llvm::Constant *InstanceMethodList =
- GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
- llvm::Constant *ClassMethodList =
+ llvm::Constant *MethodList =
GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
- InstanceMethodList->getType(),
- ClassMethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
- Elements.push_back(InstanceMethodList);
- Elements.push_back(ClassMethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
}
@@ -739,25 +812,41 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
Protocols.push_back((*PI)->getNameAsString());
llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
E = PD->instmeth_end(); iter != E; iter++) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
- InstanceMethodNames.push_back(
- CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
- InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
}
// Collect information about class methods:
llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (ObjCProtocolDecl::classmeth_iterator
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
iter != endIter ; iter++) {
std::string TypeStr;
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
- ClassMethodNames.push_back(
- CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString()));
- ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
}
llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
@@ -765,27 +854,165 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
llvm::Constant *ClassMethodList =
GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCContainerDecl::prop_iterator
+ iter = PD->prop_begin(), endIter = PD->prop_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
PtrToInt8Ty,
ProtocolList->getType(),
InstanceMethodList->getType(),
ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
+ int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+ NonFragileProtocolVersion : ProtocolVersion;
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
Elements.push_back(ClassMethodList);
- ExistingProtocols[ProtocolName] =
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
".objc_protocol"), IdTy);
}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+ // Collect information about instance methods
+ llvm::SmallVector<Selector, 1> MethodSels;
+ llvm::SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ LongTy,//FIXME: Should be size_t
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
std::string ClassName = OCD->getClassInterface()->getNameAsString();
@@ -799,19 +1026,19 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
InstanceMethodSels.push_back((*iter)->getSelector());
std::string TypeStr;
CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
- InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect information about class methods
llvm::SmallVector<Selector, 16> ClassMethodSels;
llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (ObjCCategoryImplDecl::classmeth_iterator
+ for (ObjCCategoryImplDecl::classmeth_iterator
iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
iter != endIter ; iter++) {
ClassMethodSels.push_back((*iter)->getSelector());
std::string TypeStr;
CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
- ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect the names of referenced protocols
@@ -825,7 +1052,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
std::vector<llvm::Constant*> Elements;
Elements.push_back(MakeConstantString(CategoryName));
Elements.push_back(MakeConstantString(ClassName));
- // Instance method list
+ // Instance method list
Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
false), PtrTy));
@@ -837,15 +1064,82 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Elements.push_back(llvm::ConstantExpr::getBitCast(
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy,
- PtrTy, PtrTy, NULL), Elements), PtrTy));
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+ llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ //
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ (*iter)->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ InstanceMethodSels.push_back(getter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ InstanceMethodSels.push_back(setter->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
}
void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ASTContext &Context = CGM.getContext();
// Get the superclass name.
- const ObjCInterfaceDecl * SuperClassDecl =
+ const ObjCInterfaceDecl * SuperClassDecl =
OID->getClassInterface()->getSuperClass();
std::string SuperClassName;
if (SuperClassDecl) {
@@ -860,14 +1154,15 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
// Emit the symbol that is used to generate linker errors if this class is
// referenced in other modules but not declared.
std::string classSymbolName = "__objc_class_name_" + ClassName;
- if (llvm::GlobalVariable *symbol =
+ if (llvm::GlobalVariable *symbol =
TheModule.getGlobalVariable(classSymbolName)) {
symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
} else {
- new llvm::GlobalVariable(LongTy, false, llvm::GlobalValue::ExternalLinkage,
- llvm::ConstantInt::get(LongTy, 0), classSymbolName, &TheModule);
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
}
-
+
// Get the size of instances.
int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
@@ -875,8 +1170,10 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::SmallVector<llvm::Constant*, 16> IvarNames;
llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
-
- int superInstanceSize = !SuperClassDecl ? 0 :
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
@@ -886,54 +1183,49 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
// Store the name
- IvarNames.push_back(CGM.GetAddrOfConstantCString((*iter)
- ->getNameAsString()));
+ IvarNames.push_back(MakeConstantString((*iter)->getNameAsString()));
// Get the type encoding for this ivar
std::string TypeStr;
Context.getObjCEncodingForType((*iter)->getType(), TypeStr);
- IvarTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ IvarTypes.push_back(MakeConstantString(TypeStr));
// Get the offset
- uint64_t Offset;
+ uint64_t Offset = 0;
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
- Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter) -
- superInstanceSize;
- ObjCIvarOffsetVariable(ClassDecl, *iter);
- } else {
- Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
+ Offset = BaseOffset - superInstanceSize;
}
IvarOffsets.push_back(
- llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset));
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
+ IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ llvm::ConstantInt::get(IntTy, BaseOffset),
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ (*iter)->getNameAsString()));
}
+ llvm::Constant *IvarOffsetArrayInit =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
+ IvarOffsetValues.size()), IvarOffsetValues);
+ llvm::GlobalVariable *IvarOffsetArray = new llvm::GlobalVariable(TheModule,
+ IvarOffsetArrayInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, IvarOffsetArrayInit,
+ ".ivar.offsets");
// Collect information about instance methods
llvm::SmallVector<Selector, 16> InstanceMethodSels;
llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- for (ObjCImplementationDecl::instmeth_iterator
+ for (ObjCImplementationDecl::instmeth_iterator
iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
iter != endIter ; iter++) {
InstanceMethodSels.push_back((*iter)->getSelector());
std::string TypeStr;
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
- InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
- }
- for (ObjCImplDecl::propimpl_iterator
- iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
- iter != endIter ; iter++) {
- ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
- if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- InstanceMethodSels.push_back(getter->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(getter,TypeStr);
- InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
- }
- if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- InstanceMethodSels.push_back(setter->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(setter,TypeStr);
- InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
- }
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
// Collect information about class methods
llvm::SmallVector<Selector, 16> ClassMethodSels;
llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
@@ -943,7 +1235,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassMethodSels.push_back((*iter)->getSelector());
std::string TypeStr;
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
- ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect the names of referenced protocols
llvm::SmallVector<std::string, 16> Protocols;
@@ -970,17 +1262,55 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
ClassMethodSels, ClassMethodTypes, true);
llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+ int i = 0;
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+ endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ +(*iter)->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ IvarList, offsetPointerIndexes, 4);
+ // Get the existing alias, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ }
+ }
//Generate metaclass for class methods
llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
- NULLPtr, 0x2L, /*name*/"", 0, Zeros[0], GenerateIvarList(
- empty, empty, empty), ClassMethodList, NULLPtr);
+ NULLPtr, 0x12L, /*name*/"", 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr);
// Generate the class structure
llvm::Constant *ClassStruct =
- GenerateClassStructure(MetaClassStruct, SuperClass, 0x1L,
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
ClassName.c_str(), 0,
llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
- MethodList, GenerateProtocolList(Protocols));
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties);
// Resolve the class aliases, if they exist.
if (ClassPtrAlias) {
@@ -999,23 +1329,24 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
Classes.push_back(ClassStruct);
}
-void CGObjCGNU::MergeMetadataGlobals(
- std::vector<llvm::Constant*> &UsedArray) {
-}
-llvm::Function *CGObjCGNU::ModuleInitFunction() {
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Only emit an ObjC load function if no Objective-C stuff has been called
if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
ExistingProtocols.empty() && TypedSelectors.empty() &&
UntypedSelectors.empty())
return NULL;
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
SelectorTy->getElementType());
const llvm::Type *SelStructPtrTy = SelectorTy;
bool isSelOpaque = false;
if (SelStructTy == 0) {
- SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
+ SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
isSelOpaque = true;
}
@@ -1032,13 +1363,17 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
ConstantStrings.size() + 1);
ConstantStrings.push_back(NULLPtr);
- Elements.push_back(MakeConstantString("NSConstantString",
- ".objc_static_class_name"));
+
+ const char *StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+ if (!StringClass) StringClass = "NXConstantString";
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
ConstantStrings));
- llvm::StructType *StaticsListTy =
- llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
- llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy);
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
llvm::ArrayType *StaticsListArrayTy =
llvm::ArrayType::get(StaticsListPtrTy, 2);
@@ -1051,9 +1386,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Array of classes, categories, and constant objects
llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
- llvm::Type::Int16Ty,
- llvm::Type::Int16Ty,
+ llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
+ LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
ClassListTy, NULL);
Elements.clear();
@@ -1062,7 +1398,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
iter != iterEnd ; ++iter) {
- Elements.push_back(MakeConstantString(iter->first.first, ".objc_sel_name"));
+ Elements.push_back(ExportUniqueString(iter->first.first, ".objc_sel_name"));
Elements.push_back(MakeConstantString(iter->first.second,
".objc_sel_types"));
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
@@ -1072,7 +1408,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
iter != iterEnd; ++iter) {
Elements.push_back(
- MakeConstantString(iter->getKeyData(), ".objc_sel_name"));
+ ExportUniqueString(iter->getKeyData(), ".objc_sel_name"));
Elements.push_back(NULLPtr);
Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
Elements.clear();
@@ -1086,7 +1422,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *SelectorList = MakeGlobal(
llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
".objc_selector_list");
- Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
SelStructPtrTy));
// Now that all of the static selectors exist, create pointers to them.
@@ -1095,11 +1431,11 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
iter != iterEnd; ++iter) {
llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
- llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy,
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
true, llvm::GlobalValue::InternalLinkage,
llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- ".objc_sel_ptr", &TheModule);
+ ".objc_sel_ptr");
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
@@ -1112,11 +1448,12 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
iter != iterEnd; iter++) {
llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]};
- llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy, true,
- llvm::GlobalValue::InternalLinkage,
- llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- ".objc_sel_ptr", &TheModule);
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+ llvm::Constant *SelPtr = new llvm::GlobalVariable
+ (TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ ".objc_sel_ptr");
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
@@ -1126,10 +1463,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
(*iter).second->setAliasee(SelPtr);
}
// Number of classes defined.
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
Classes.size()));
// Number of categories defined
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty,
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
Categories.size()));
// Create an array of classes, then categories, then static object instances
Classes.insert(Classes.end(), Categories.begin(), Categories.end());
@@ -1138,24 +1475,25 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Classes.push_back(NULLPtr);
llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
Elements.push_back(ClassList);
- // Construct the symbol table
+ // Construct the symbol table
llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
// The symbol table is contained in a module which has some version-checking
// constants
- llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
Elements.clear();
// Runtime version used for compatibility checking.
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
- Elements.push_back(llvm::ConstantInt::get(LongTy,
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
NonFragileRuntimeVersion));
} else {
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
}
// sizeof(ModuleTy)
llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
- Elements.push_back(llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy)/8));
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy)/8));
//FIXME: Should be the path to the file where this module was declared
Elements.push_back(NULLPtr);
Elements.push_back(SymTab);
@@ -1164,17 +1502,18 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Create the load function calling the runtime entry point with the module
// structure
llvm::Function * LoadFunction = llvm::Function::Create(
- llvm::FunctionType::get(llvm::Type::VoidTy, false),
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
llvm::GlobalValue::InternalLinkage, ".objc_load_function",
&TheModule);
- llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction);
- CGBuilderTy Builder;
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(VMContext);
Builder.SetInsertPoint(EntryBB);
std::vector<const llvm::Type*> Params(1,
llvm::PointerType::getUnqual(ModuleTy));
llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
- llvm::Type::VoidTy, Params, true), "__objc_exec_class");
+ llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class");
Builder.CreateCall(Register, Module);
Builder.CreateRetVoid();
@@ -1182,8 +1521,8 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
}
llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD) {
- const ObjCCategoryImplDecl *OCD =
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
std::string CategoryName = OCD ? OCD->getNameAsString() : "";
std::string ClassName = OMD->getClassInterface()->getNameAsString();
@@ -1191,71 +1530,76 @@ llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
bool isClassMethod = !OMD->isInstanceMethod();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ const llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
- llvm::Function *Method = llvm::Function::Create(MethodTy,
- llvm::GlobalValue::InternalLinkage,
- FunctionName,
- &TheModule);
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
return Method;
}
llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
- std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
- Params.push_back(BoolTy);
- // void objc_getProperty (id, SEL, ptrdiff_t, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(IdTy, Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_getProperty"));
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
}
llvm::Function *CGObjCGNU::GetPropertySetFunction() {
- std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
- Params.push_back(IdTy);
- Params.push_back(BoolTy);
- Params.push_back(BoolTy);
- // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_setProperty"));
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
}
-llvm::Function *CGObjCGNU::EnumerationMutationFunction() {
- std::vector<const llvm::Type*> Params(1, IdTy);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(llvm::Type::VoidTy, Params, true),
- "objc_enumerationMutation"));
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ llvm::SmallVector<QualType,16> Params;
+ Params.push_back(ASTIdTy);
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) {
// Pointer to the personality function
llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
true),
"__gnu_objc_personality_v0");
Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
std::vector<const llvm::Type*> Params;
Params.push_back(PtrTy);
llvm::Value *RethrowFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
Params, false), "_Unwind_Resume_or_Rethrow");
bool isTry = isa<ObjCAtTryStmt>(S);
@@ -1271,9 +1615,9 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (!isTry) {
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- llvm::Value *SyncArg =
+ llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
CGF.Builder.CreateCall(SyncEnter, SyncArg);
@@ -1288,7 +1632,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.setInvokeDest(TryHandler);
CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
: cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
// Jump to @finally if there is no exception
@@ -1299,17 +1643,12 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// Get the correct versions of the exception handling intrinsics
llvm::TargetData td = llvm::TargetData::TargetData(&TheModule);
- int PointerWidth = td.getTypeSizeInBits(PtrTy);
- assert((PointerWidth == 32 || PointerWidth == 64) &&
- "Can't yet handle exceptions if pointers are not 32 or 64 bits");
- llvm::Value *llvm_eh_exception =
+ llvm::Value *llvm_eh_exception =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector = PointerWidth == 32 ?
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i32) :
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64);
- llvm::Value *llvm_eh_typeid_for = PointerWidth == 32 ?
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i32) :
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64);
+ llvm::Value *llvm_eh_selector =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
// Exception object
llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
@@ -1330,23 +1669,25 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+ Handlers.push_back(std::make_pair(CatchDecl,
+ CatchStmt->getCatchBody()));
// @catch() and @catch(id) both catch any ObjC exception
- if (!CatchDecl || CGF.getContext().isObjCIdType(CatchDecl->getType())
+ if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
|| CatchDecl->getType()->isObjCQualifiedIdType()) {
// Use i8* null here to signal this is a catch all, not a cleanup.
ESelArgs.push_back(NULLPtr);
HasCatchAll = true;
// No further catches after this one will ever by reached
break;
- }
+ }
// All other types should be Objective-C interface pointer types.
- const PointerType *PT = CatchDecl->getType()->getAsPointerType();
- assert(PT && "Invalid @catch type.");
- const ObjCInterfaceType *IT =
- PT->getPointeeType()->getAsObjCInterfaceType();
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT =
+ OPT->getPointeeType()->getAs<ObjCInterfaceType>();
assert(IT && "Invalid @catch type.");
llvm::Value *EHType =
MakeConstantString(IT->getDecl()->getNameAsString());
@@ -1357,7 +1698,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// We use a cleanup unless there was already a catch all.
if (!HasCatchAll) {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
}
@@ -1386,11 +1727,11 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.EmitBlock(Match);
}
-
+
if (CatchBody) {
llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
CGF.ConvertType(CatchParam->getType()));
-
+
// Bind the catch parameter if it exists.
if (CatchParam) {
// CatchParam is a ParmVarDecl because of the grammar
@@ -1422,7 +1763,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
ESelArgs.clear();
ESelArgs.push_back(Exc);
ESelArgs.push_back(Personality);
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0));
+ ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
"selector");
CGF.Builder.CreateCall(llvm_eh_typeid_for,
@@ -1438,7 +1779,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
+ if (const ObjCAtFinallyStmt* FinallyStmt =
cast<ObjCAtTryStmt>(S).getFinallyStmt())
CGF.EmitStmt(FinallyStmt->getFinallyBody());
} else {
@@ -1446,9 +1787,9 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
// @synchronized.
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- llvm::Value *SyncArg =
+ llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
CGF.Builder.CreateCall(SyncExit, SyncArg);
@@ -1465,7 +1806,7 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
CGF.EmitBlock(FinallyRethrow);
CGF.Builder.CreateCall(RethrowFn, CGF.Builder.CreateLoad(RethrowPtr));
CGF.Builder.CreateUnreachable();
-
+
CGF.EmitBlock(FinallyEnd);
}
@@ -1476,21 +1817,21 @@ void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
- llvm::Value *ThrowFn =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Value *ThrowFn =
CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
-
+
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
ExceptionAsObject = Exception;
} else {
- assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject =
CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
-
+
// Note: This may have to be an invoke, if we want to support constructs like:
// @try {
// @throw(obj);
@@ -1513,32 +1854,35 @@ void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
}
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj)
-{
+ llvm::Value *AddrWeakObj) {
return 0;
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
return;
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
+ return;
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ QualType Ty) {
return;
}
@@ -1550,15 +1894,29 @@ llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
// Emit the variable and initialize it with what we think the correct value
// is. This allows code compiled with non-fragile ivars to work correctly
// when linked against code which isn't (most of the time).
- llvm::GlobalVariable *IvarOffsetGV = CGM.getModule().getGlobalVariable(Name);
- if (!IvarOffsetGV) {
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
llvm::ConstantInt *OffsetGuess =
llvm::ConstantInt::get(LongTy, Offset, "ivar");
- IvarOffsetGV = new llvm::GlobalVariable(LongTy, false,
- llvm::GlobalValue::CommonLinkage, OffsetGuess, Name, &TheModule);
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOptions().PICLevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32Ty(VMContext), false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
}
- return IvarOffsetGV;
+ return IvarOffsetPointer;
}
LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
@@ -1566,10 +1924,11 @@ LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
- const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl();
+ const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
EmitIvarOffset(CGF, ID, Ivar));
}
+
static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
const ObjCInterfaceDecl *OID,
const ObjCIvarDecl *OIVD) {
@@ -1579,27 +1938,27 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
if (OIVD == Ivars[k])
return OID;
}
-
+
// Otherwise check in the super class.
if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
return FindIvarInterface(Context, Super, OIVD);
-
+
return 0;
}
llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) {
- if (CGF.getContext().getLangOptions().ObjCNonFragileABI)
- {
+ if (CGM.getLangOptions().ObjCNonFragileABI) {
Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
- return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),
- false, "ivar");
+ return CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar"));
}
uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
return llvm::ConstantInt::get(LongTy, Offset, "ivar");
}
-CodeGen::CGObjCRuntime *CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM){
+CodeGen::CGObjCRuntime *
+CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM) {
return new CGObjCGNU(CGM);
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index c3354574c775..4485ed5aee75 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -23,10 +23,14 @@
#include "clang/Basic/LangOptions.h"
#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Module.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
-#include <sstream>
+#include <cstdio>
using namespace clang;
using namespace CodeGen;
@@ -35,7 +39,7 @@ using namespace CodeGen;
// don't belong in CGObjCRuntime either so we will live with it for
// now.
-/// FindIvarInterface - Find the interface containing the ivar.
+/// FindIvarInterface - Find the interface containing the ivar.
///
/// FIXME: We shouldn't need to do this, the containing context should
/// be fixed.
@@ -54,11 +58,11 @@ static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
return OID;
++Index;
}
-
+
// Otherwise check in the super class.
if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
return FindIvarInterface(Context, Super, OIVD, Index);
-
+
return 0;
}
@@ -67,13 +71,13 @@ static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
const ObjCImplementationDecl *ID,
const ObjCIvarDecl *Ivar) {
unsigned Index;
- const ObjCInterfaceDecl *Container =
+ const ObjCInterfaceDecl *Container =
FindIvarInterface(CGM.getContext(), OID, Ivar, Index);
assert(Container && "Unable to find ivar container");
// If we know have an implementation (and the ivar is in it) then
// look up in the implementation layout.
- const ASTRecordLayout *RL;
+ const ASTRecordLayout *RL;
if (ID && ID->getClassInterface() == Container)
RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
else
@@ -100,13 +104,16 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
+ const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
QualType IvarTy = Ivar->getType();
const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
-
+
+ Qualifiers Quals = CGF.MakeQualifiers(IvarTy);
+ Quals.addCVRQualifiers(CVRQualifiers);
+
if (Ivar->isBitField()) {
// We need to compute the bit offset for the bit-field, the offset
// is to the byte. Note, there is a subtle invariant here: we can
@@ -119,12 +126,11 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
return LValue::MakeBitfield(V, BitOffset, BitFieldSize,
IvarTy->isSignedIntegerType(),
- IvarTy.getCVRQualifiers()|CVRQualifiers);
+ Quals.getCVRQualifiers());
}
- LValue LV = LValue::MakeAddr(V, IvarTy.getCVRQualifiers()|CVRQualifiers,
- CGF.CGM.getContext().getObjCGCAttrKind(IvarTy));
- LValue::SetObjCIvar(LV, true);
+
+ LValue LV = LValue::MakeAddr(V, Quals);
return LV;
}
@@ -132,12 +138,15 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
namespace {
- typedef std::vector<llvm::Constant*> ConstantVector;
+typedef std::vector<llvm::Constant*> ConstantVector;
- // FIXME: We should find a nicer way to make the labels for metadata, string
- // concatenation is lame.
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
private:
llvm::Constant *getMessageSendFn() const {
// id objc_msgSend (id, SEL, ...)
@@ -145,23 +154,23 @@ private:
Params.push_back(ObjectPtrTy);
Params.push_back(SelectorPtrTy);
return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
- Params, true),
- "objc_msgSend");
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ Params, true),
+ "objc_msgSend");
}
-
+
llvm::Constant *getMessageSendStretFn() const {
// id objc_msgSend_stret (id, SEL, ...)
std::vector<const llvm::Type*> Params;
Params.push_back(ObjectPtrTy);
Params.push_back(SelectorPtrTy);
return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, true),
- "objc_msgSend_stret");
-
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSend_stret");
+
}
-
+
llvm::Constant *getMessageSendFpretFn() const {
// FIXME: This should be long double on x86_64?
// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
@@ -169,13 +178,14 @@ private:
Params.push_back(ObjectPtrTy);
Params.push_back(SelectorPtrTy);
return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::DoubleTy,
- Params,
- true),
- "objc_msgSend_fpret");
-
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getDoubleTy(VMContext),
+ Params,
+ true),
+ "objc_msgSend_fpret");
+
}
-
+
llvm::Constant *getMessageSendSuperFn() const {
// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
const char *SuperName = "objc_msgSendSuper";
@@ -186,7 +196,7 @@ private:
Params, true),
SuperName);
}
-
+
llvm::Constant *getMessageSendSuperFn2() const {
// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
const char *SuperName = "objc_msgSendSuper2";
@@ -197,7 +207,7 @@ private:
Params, true),
SuperName);
}
-
+
llvm::Constant *getMessageSendSuperStretFn() const {
// void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
// SEL op, ...)
@@ -205,11 +215,12 @@ private:
Params.push_back(Int8PtrTy);
Params.push_back(SuperPtrTy);
Params.push_back(SelectorPtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, true),
- "objc_msgSendSuper_stret");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper_stret");
}
-
+
llvm::Constant *getMessageSendSuperStretFn2() const {
// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
// SEL op, ...)
@@ -217,68 +228,69 @@ private:
Params.push_back(Int8PtrTy);
Params.push_back(SuperPtrTy);
Params.push_back(SelectorPtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, true),
- "objc_msgSendSuper2_stret");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, true),
+ "objc_msgSendSuper2_stret");
}
-
+
llvm::Constant *getMessageSendSuperFpretFn() const {
// There is no objc_msgSendSuper_fpret? How can that work?
return getMessageSendSuperFn();
}
-
+
llvm::Constant *getMessageSendSuperFpretFn2() const {
// There is no objc_msgSendSuper_fpret? How can that work?
return getMessageSendSuperFn2();
}
-
+
protected:
CodeGen::CodeGenModule &CGM;
-
+
public:
const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
const llvm::Type *Int8PtrTy;
-
+
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
const llvm::Type *ObjectPtrTy;
-
+
/// PtrObjectPtrTy - LLVM type for id *
const llvm::Type *PtrObjectPtrTy;
-
+
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
const llvm::Type *SelectorPtrTy;
/// ProtocolPtrTy - LLVM type for external protocol handles
/// (typeof(Protocol))
const llvm::Type *ExternalProtocolPtrTy;
-
+
// SuperCTy - clang type for struct objc_super.
QualType SuperCTy;
// SuperPtrCTy - clang type for struct objc_super *.
QualType SuperPtrCTy;
-
+
/// SuperTy - LLVM type for struct objc_super.
const llvm::StructType *SuperTy;
/// SuperPtrTy - LLVM type for struct objc_super *.
const llvm::Type *SuperPtrTy;
-
+
/// PropertyTy - LLVM type for struct objc_property (struct _prop_t
/// in GCC parlance).
const llvm::StructType *PropertyTy;
-
+
/// PropertyListTy - LLVM type for struct objc_property_list
/// (_prop_list_t in GCC parlance).
const llvm::StructType *PropertyListTy;
/// PropertyListPtrTy - LLVM type for struct objc_property_list*.
const llvm::Type *PropertyListPtrTy;
-
+
// MethodTy - LLVM type for struct objc_method.
const llvm::StructType *MethodTy;
-
+
/// CacheTy - LLVM type for struct objc_cache.
const llvm::Type *CacheTy;
/// CachePtrTy - LLVM type for struct objc_cache *.
const llvm::Type *CachePtrTy;
-
+
llvm::Constant *getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -294,7 +306,7 @@ public:
Types.GetFunctionType(Types.getFunctionInfo(IdType, Params), false);
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
-
+
llvm::Constant *getSetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -312,25 +324,28 @@ public:
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false);
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
-
+
llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
// void objc_enumerationMutation (id)
- std::vector<const llvm::Type*> Args;
- Args.push_back(ObjectPtrTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::SmallVector<QualType,16> Params;
+ Params.push_back(Ctx.getObjCIdType());
+ const llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false);
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
-
+
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::Constant *getGcReadWeakFn() {
// id objc_read_weak (id *)
std::vector<const llvm::Type*> Args;
Args.push_back(ObjectPtrTy->getPointerTo());
- llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
- }
-
+ }
+
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::Constant *getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
@@ -340,31 +355,45 @@ public:
llvm::FunctionType::get(ObjectPtrTy, Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
-
+
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::Constant *getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
Args.push_back(ObjectPtrTy->getPointerTo());
- llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
-
+
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::Constant *getGcAssignIvarFn() {
- // id objc_assign_ivar(id, id *)
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
Args.push_back(ObjectPtrTy->getPointerTo());
- llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
-
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+ Args.push_back(Int8PtrTy);
+ Args.push_back(LongTy);
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::Constant *getGcAssignStrongCastFn() {
// id objc_assign_global(id, id *)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
Args.push_back(ObjectPtrTy->getPointerTo());
- llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
@@ -373,52 +402,52 @@ public:
// void objc_exception_throw(id)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
}
-
+
/// SyncEnterFn - LLVM object_sync_enter function.
llvm::Constant *getSyncEnterFn() {
// void objc_sync_enter (id)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
}
-
+
/// SyncExitFn - LLVM object_sync_exit function.
llvm::Constant *getSyncExitFn() {
// void objc_sync_exit (id)
std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::VoidTy, Args, false);
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
}
-
+
llvm::Constant *getSendFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
}
-
+
llvm::Constant *getSendFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
}
-
+
llvm::Constant *getSendStretFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
}
-
+
llvm::Constant *getSendStretFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
}
-
+
llvm::Constant *getSendFpretFn(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
}
-
+
llvm::Constant *getSendFpretFn2(bool IsSuper) const {
return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
}
-
+
ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCCommonTypesHelper(){}
};
@@ -477,26 +506,28 @@ public:
const llvm::Type *MethodListTy;
/// MethodListPtrTy - LLVM type for struct objc_method_list *.
const llvm::Type *MethodListPtrTy;
-
+
/// ExceptionDataTy - LLVM type for struct _objc_exception_data.
const llvm::Type *ExceptionDataTy;
-
+
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, false),
- "objc_exception_try_enter");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_enter");
}
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::Constant *getExceptionTryExitFn() {
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, false),
- "objc_exception_try_exit");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "objc_exception_try_exit");
}
/// ExceptionExtractFn - LLVM objc_exception_extract function.
@@ -506,31 +537,32 @@ public:
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
Params, false),
"objc_exception_extract");
-
+
}
-
+
/// ExceptionMatchFn - LLVM objc_exception_match function.
llvm::Constant *getExceptionMatchFn() {
std::vector<const llvm::Type*> Params;
Params.push_back(ClassPtrTy);
Params.push_back(ObjectPtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
- Params, false),
- "objc_exception_match");
-
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+ Params, false),
+ "objc_exception_match");
+
}
-
+
/// SetJmpFn - LLVM _setjmp function.
llvm::Constant *getSetJmpFn() {
std::vector<const llvm::Type*> Params;
- Params.push_back(llvm::PointerType::getUnqual(llvm::Type::Int32Ty));
+ Params.push_back(llvm::Type::getInt32PtrTy(VMContext));
return
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
Params, false),
"_setjmp");
-
+
}
-
+
public:
ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCTypesHelper() {}
@@ -540,51 +572,51 @@ public:
/// modern abi
class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
public:
-
+
// MethodListnfABITy - LLVM for struct _method_list_t
const llvm::StructType *MethodListnfABITy;
-
+
// MethodListnfABIPtrTy - LLVM for struct _method_list_t*
const llvm::Type *MethodListnfABIPtrTy;
-
+
// ProtocolnfABITy = LLVM for struct _protocol_t
const llvm::StructType *ProtocolnfABITy;
-
+
// ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
const llvm::Type *ProtocolnfABIPtrTy;
// ProtocolListnfABITy - LLVM for struct _objc_protocol_list
const llvm::StructType *ProtocolListnfABITy;
-
+
// ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
const llvm::Type *ProtocolListnfABIPtrTy;
-
+
// ClassnfABITy - LLVM for struct _class_t
const llvm::StructType *ClassnfABITy;
-
+
// ClassnfABIPtrTy - LLVM for struct _class_t*
const llvm::Type *ClassnfABIPtrTy;
-
+
// IvarnfABITy - LLVM for struct _ivar_t
const llvm::StructType *IvarnfABITy;
-
+
// IvarListnfABITy - LLVM for struct _ivar_list_t
const llvm::StructType *IvarListnfABITy;
-
+
// IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
const llvm::Type *IvarListnfABIPtrTy;
-
+
// ClassRonfABITy - LLVM for struct _class_ro_t
const llvm::StructType *ClassRonfABITy;
-
+
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
const llvm::Type *ImpnfABITy;
-
+
// CategorynfABITy - LLVM for struct _category_t
const llvm::StructType *CategorynfABITy;
-
+
// New types for nonfragile abi messaging.
-
+
// MessageRefTy - LLVM for:
// struct _message_ref_t {
// IMP messenger;
@@ -593,22 +625,22 @@ public:
const llvm::StructType *MessageRefTy;
// MessageRefCTy - clang type for struct _message_ref_t
QualType MessageRefCTy;
-
+
// MessageRefPtrTy - LLVM for struct _message_ref_t*
const llvm::Type *MessageRefPtrTy;
// MessageRefCPtrTy - clang type for struct _message_ref_t*
QualType MessageRefCPtrTy;
-
+
// MessengerTy - Type of the messenger (shown as IMP above)
const llvm::FunctionType *MessengerTy;
-
+
// SuperMessageRefTy - LLVM for:
// struct _super_message_ref_t {
// SUPER_IMP messenger;
// SEL name;
// };
const llvm::StructType *SuperMessageRefTy;
-
+
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
const llvm::Type *SuperMessageRefPtrTy;
@@ -621,7 +653,7 @@ public:
Params, true),
"objc_msgSend_fixup");
}
-
+
llvm::Constant *getMessageSendFpretFixupFn() {
// id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
@@ -631,7 +663,7 @@ public:
Params, true),
"objc_msgSend_fpret_fixup");
}
-
+
llvm::Constant *getMessageSendStretFixupFn() {
// id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
@@ -641,7 +673,7 @@ public:
Params, true),
"objc_msgSend_stret_fixup");
}
-
+
llvm::Constant *getMessageSendIdFixupFn() {
// id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
@@ -651,7 +683,7 @@ public:
Params, true),
"objc_msgSendId_fixup");
}
-
+
llvm::Constant *getMessageSendIdStretFixupFn() {
// id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
@@ -662,7 +694,7 @@ public:
"objc_msgSendId_stret_fixup");
}
llvm::Constant *getMessageSendSuper2FixupFn() {
- // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
// struct _super_message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
Params.push_back(SuperPtrTy);
@@ -671,9 +703,9 @@ public:
Params, true),
"objc_msgSendSuper2_fixup");
}
-
+
llvm::Constant *getMessageSendSuper2StretFixupFn() {
- // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
// struct _super_message_ref_t*, ...)
std::vector<const llvm::Type*> Params;
Params.push_back(SuperPtrTy);
@@ -682,34 +714,35 @@ public:
Params, true),
"objc_msgSendSuper2_stret_fixup");
}
-
-
-
+
+
+
/// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
/// exception personality function.
llvm::Value *getEHPersonalityPtr() {
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty,
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
true),
- "__objc_personality_v0");
+ "__objc_personality_v0");
return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
}
llvm::Constant *getUnwindResumeOrRethrowFn() {
std::vector<const llvm::Type*> Params;
Params.push_back(Int8PtrTy);
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
- Params, false),
- "_Unwind_Resume_or_Rethrow");
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ Params, false),
+ "_Unwind_Resume_or_Rethrow");
}
-
+
llvm::Constant *getObjCEndCatchFn() {
- return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy,
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false),
"objc_end_catch");
-
+
}
-
+
llvm::Constant *getObjCBeginCatchFn() {
std::vector<const llvm::Type*> Params;
Params.push_back(Int8PtrTy);
@@ -724,7 +757,7 @@ public:
ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
~ObjCNonFragileABITypesHelper(){}
};
-
+
class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
public:
// FIXME - accessibility
@@ -733,129 +766,126 @@ public:
unsigned ivar_bytepos;
unsigned ivar_size;
GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
- : ivar_bytepos(bytepos), ivar_size(size) {}
+ : ivar_bytepos(bytepos), ivar_size(size) {}
// Allow sorting based on byte pos.
bool operator<(const GC_IVAR &b) const {
return ivar_bytepos < b.ivar_bytepos;
}
};
-
+
class SKIP_SCAN {
public:
unsigned skip;
unsigned scan;
- SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
: skip(_skip), scan(_scan) {}
};
-
+
protected:
CodeGen::CodeGenModule &CGM;
+ llvm::LLVMContext &VMContext;
// FIXME! May not be needing this after all.
unsigned ObjCABI;
-
+
// gc ivar layout bitmap calculation helper caches.
llvm::SmallVector<GC_IVAR, 16> SkipIvars;
llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
-
+
/// LazySymbols - Symbols to generate a lazy reference for. See
/// DefinedSymbols and FinishModule().
- std::set<IdentifierInfo*> LazySymbols;
-
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
/// DefinedSymbols - External symbols which are defined by this
/// module. The symbols in this list and LazySymbols are used to add
/// special linker symbols which ensure that Objective-C modules are
/// linked properly.
- std::set<IdentifierInfo*> DefinedSymbols;
-
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
/// ClassNames - uniqued class names.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
-
+
/// MethodVarNames - uniqued method variable names.
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
-
+
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
-
+
/// MethodDefinitions - map of methods which have been defined in
/// this translation unit.
llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
-
+
/// PropertyNames - uniqued method variable names.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
-
+
/// ClassReferences - uniqued class references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
-
+
/// SelectorReferences - uniqued selector references.
llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
-
+
/// Protocols - Protocols for which an objc_protocol structure has
/// been emitted. Forward declarations are handled by creating an
/// empty structure whose initializer is filled in when/if defined.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
-
+
/// DefinedProtocols - Protocols which have actually been
/// defined. We should not need this, see FIXME in GenerateProtocol.
llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
-
+
/// DefinedClasses - List of defined classes.
std::vector<llvm::GlobalValue*> DefinedClasses;
/// DefinedNonLazyClasses - List of defined "non-lazy" classes.
std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
-
+
/// DefinedCategories - List of defined categories.
std::vector<llvm::GlobalValue*> DefinedCategories;
-
+
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
-
- /// UsedGlobals - List of globals to pack into the llvm.used metadata
- /// to prevent them from being clobbered.
- std::vector<llvm::GlobalVariable*> UsedGlobals;
/// GetNameForMethod - Return a name for the given method.
/// \param[out] NameOut - The return value.
void GetNameForMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD,
std::string &NameOut);
-
+
/// GetMethodVarName - Return a unique constant for the given
/// selector's name. The return value has type char *.
llvm::Constant *GetMethodVarName(Selector Sel);
llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
llvm::Constant *GetMethodVarName(const std::string &Name);
-
+
/// GetMethodVarType - Return a unique constant for the given
/// selector's name. The return value has type char *.
-
+
// FIXME: This is a horrible name.
llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
llvm::Constant *GetMethodVarType(const FieldDecl *D);
-
+
/// GetPropertyName - Return a unique constant for the given
/// name. The return value has type char *.
llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
-
+
// FIXME: This can be dropped once string functions are unified.
llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
const Decl *Container);
-
+
/// GetClassName - Return a unique constant for the given selector's
/// name. The return value has type char *.
llvm::Constant *GetClassName(IdentifierInfo *Ident);
-
+
/// BuildIvarLayout - Builds ivar layout bitmap for the class
/// implementation for the __strong or __weak case.
///
llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
bool ForStrongLayout);
-
+
void BuildAggrIvarRecordLayout(const RecordType *RT,
- unsigned int BytePos, bool ForStrongLayout,
- bool &HasUnion);
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
const llvm::StructLayout *Layout,
const RecordDecl *RD,
@@ -867,14 +897,14 @@ protected:
/// ivar layout bitmap.
llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
const ObjCCommonTypesHelper &ObjCTypes);
-
+
/// EmitPropertyList - Emit the given property list. The return
/// value has type PropertyListPtrTy.
llvm::Constant *EmitPropertyList(const std::string &Name,
- const Decl *Container,
+ const Decl *Container,
const ObjCContainerDecl *OCD,
const ObjCCommonTypesHelper &ObjCTypes);
-
+
/// GetProtocolRef - Return a reference to the internal protocol
/// description, creating an empty one if it has not been
/// defined. The return value has type ProtocolPtrTy.
@@ -885,7 +915,7 @@ protected:
///
/// This is a convenience wrapper which not only creates the
/// variable, but also sets the section and alignment and adds the
- /// global to the UsedGlobals list.
+ /// global to the "llvm.used" list.
///
/// \param Name - The variable name.
/// \param Init - The variable initializer; this is also used to
@@ -907,33 +937,32 @@ protected:
QualType Arg0Ty,
bool IsSuper,
const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
const ObjCCommonTypesHelper &ObjCTypes);
- virtual void MergeMetadataGlobals(std::vector<llvm::Constant*> &UsedArray);
-
public:
- CGObjCCommonMac(CodeGen::CodeGenModule &cgm) : CGM(cgm)
- { }
-
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+
virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL);
-
+
virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=0);
-
+
virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
-
+
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
/// ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
-
+
/// GetOrEmitProtocolRef - Get a forward reference to the protocol
/// object for the given declaration, emitting it if needed. These
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
};
-
+
class CGObjCMac : public CGObjCCommonMac {
private:
ObjCTypesHelper ObjCTypes;
@@ -942,7 +971,7 @@ private:
void EmitImageInfo();
/// EmitModuleInfo - Another marker encoding module level
- /// information.
+ /// information.
void EmitModuleInfo();
/// EmitModuleSymols - Emit module symbols, the list of defined
@@ -960,7 +989,7 @@ private:
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class.
- llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -978,7 +1007,7 @@ private:
/// IvarListPtrTy.
llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
bool ForClass);
-
+
/// EmitMetaClass - Emit a forward reference to the class structure
/// for the metaclass of the given interface. The return value has
/// type ClassPtrTy.
@@ -989,9 +1018,9 @@ private:
llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
const ConstantVector &Methods);
-
+
llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
-
+
llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
/// EmitMethodList - Emit the method list for the given
@@ -1001,7 +1030,7 @@ private:
const ConstantVector &Methods);
/// EmitMethodDescList - Emit a method description list for a list of
- /// method declarations.
+ /// method declarations.
/// - TypeName: The name for the type containing the methods.
/// - IsProtocol: True iff these methods are for a protocol.
/// - ClassMethds: True iff these are class methods.
@@ -1044,12 +1073,12 @@ private:
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
-
- public:
+
+public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
virtual llvm::Function *ModuleInitFunction();
-
+
virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -1058,7 +1087,7 @@ private:
const CallArgList &CallArgs,
const ObjCMethodDecl *Method);
- virtual CodeGen::RValue
+ virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -1066,8 +1095,9 @@ private:
bool isCategoryImpl,
llvm::Value *Receiver,
bool IsClassMessage,
- const CallArgList &CallArgs);
-
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
@@ -1084,26 +1114,30 @@ private:
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
-
+
virtual llvm::Constant *GetPropertyGetFunction();
virtual llvm::Constant *GetPropertySetFunction();
virtual llvm::Constant *EnumerationMutationFunction();
-
+
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj);
+ llvm::Value *AddrWeakObj);
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst);
+ llvm::Value *src, llvm::Value *dst);
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
-
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ QualType Ty);
+
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -1113,30 +1147,30 @@ private:
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
};
-
+
class CGObjCNonFragileABIMac : public CGObjCCommonMac {
private:
ObjCNonFragileABITypesHelper ObjCTypes;
llvm::GlobalVariable* ObjCEmptyCacheVar;
llvm::GlobalVariable* ObjCEmptyVtableVar;
-
+
/// SuperClassReferences - uniqued super class references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
-
+
/// MetaClassReferences - uniqued meta class references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
/// EHTypeReferences - uniqued class ehtype references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
-
+
/// NonLegacyDispatchMethods - List of methods for which we do *not* generate
/// legacy messaging dispatch.
llvm::DenseSet<Selector> NonLegacyDispatchMethods;
-
+
/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
/// NonLegacyDispatchMethods; false otherwise.
bool LegacyDispatchedSelector(Selector Sel);
-
+
/// FinishNonFragileABIModule - Write out global data structures at the end of
/// processing a translation unit.
void FinishNonFragileABIModule();
@@ -1147,20 +1181,20 @@ private:
const char *SymbolName,
const char *SectionName);
- llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
- unsigned InstanceStart,
- unsigned InstanceSize,
- const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
- llvm::Constant *IsAGV,
+ llvm::Constant *IsAGV,
llvm::Constant *SuperClassGV,
llvm::Constant *ClassRoGV,
bool HiddenVisibility);
-
+
llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
-
+
llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
-
+
/// EmitMethodList - Emit the method list for the given
/// implementation. The return value has type MethodListnfABITy.
llvm::Constant *EmitMethodList(const std::string &Name,
@@ -1172,28 +1206,28 @@ private:
/// interface ivars will be emitted. The return value has type
/// IvarListnfABIPtrTy.
llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
-
+
llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
const ObjCIvarDecl *Ivar,
unsigned long int offset);
-
+
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
/// ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
-
+
/// GetOrEmitProtocolRef - Get a forward reference to the protocol
/// object for the given declaration, emitting it if needed. These
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
-
+
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
llvm::Constant *EmitProtocolList(const std::string &Name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end);
-
+
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -1205,29 +1239,29 @@ private:
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
-
+
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class reference.
- llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
-
+
/// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given super class reference.
- llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
- const ObjCInterfaceDecl *ID);
-
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
/// EmitMetaClassRef - Return a Value * of the address of _class_t
/// meta-data
- llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
/// the given ivar.
///
llvm::GlobalVariable * ObjCIvarOffsetVariable(
- const ObjCInterfaceDecl *ID,
- const ObjCIvarDecl *Ivar);
-
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
/// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
/// for the given selector.
llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
@@ -1237,10 +1271,10 @@ private:
llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
bool ForDefinition);
- const char *getMetaclassSymbolPrefix() const {
+ const char *getMetaclassSymbolPrefix() const {
return "OBJC_METACLASS_$_";
}
-
+
const char *getClassSymbolPrefix() const {
return "OBJC_CLASS_$_";
}
@@ -1248,13 +1282,13 @@ private:
void GetClassSizeInfo(const ObjCImplementationDecl *OID,
uint32_t &InstanceStart,
uint32_t &InstanceSize);
-
+
// Shamelessly stolen from Analysis/CFRefCount.cpp
Selector GetNullarySelector(const char* name) const {
IdentifierInfo* II = &CGM.getContext().Idents.get(name);
return CGM.getContext().Selectors.getSelector(0, &II);
}
-
+
Selector GetUnarySelector(const char* name) const {
IdentifierInfo* II = &CGM.getContext().Idents.get(name);
return CGM.getContext().Selectors.getSelector(1, &II);
@@ -1268,7 +1302,7 @@ public:
CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
// FIXME. All stubs for now!
virtual llvm::Function *ModuleInitFunction();
-
+
virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -1276,8 +1310,8 @@ public:
bool IsClassMessage,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method);
-
- virtual CodeGen::RValue
+
+ virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -1285,11 +1319,12 @@ public:
bool isCategoryImpl,
llvm::Value *Receiver,
bool IsClassMessage,
- const CallArgList &CallArgs);
-
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *ID);
-
+
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
{ return EmitSelector(Builder, Sel); }
@@ -1298,23 +1333,23 @@ public:
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
const ObjCMethodDecl *Method)
{ return EmitSelector(Builder, Method->getSelector()); }
-
+
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
-
+
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD);
-
- virtual llvm::Constant *GetPropertyGetFunction() {
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
return ObjCTypes.getGetPropertyFn();
}
- virtual llvm::Constant *GetPropertySetFunction() {
- return ObjCTypes.getSetPropertyFn();
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
}
virtual llvm::Constant *EnumerationMutationFunction() {
return ObjCTypes.getEnumerationMutationFn();
}
-
+
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -1326,9 +1361,13 @@ public:
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dest);
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ QualType Ty);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -1338,25 +1377,26 @@ public:
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
};
-
+
} // end anonymous namespace
/* *** Helper Functions *** */
/// getConstantGEP() - Help routine to construct simple GEPs.
-static llvm::Const