diff options
author | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:58:47 +0000 |
---|---|---|
committer | Ed Schouten <ed@FreeBSD.org> | 2009-06-02 17:58:47 +0000 |
commit | ec2b103c267a06a66e926f62cd96767b280f5cf5 (patch) | |
tree | ce7d964cbb5e39695b71481698f10cb099c23d4a /lib/CodeGen | |
download | src-ec2b103c267a06a66e926f62cd96767b280f5cf5.tar.gz src-ec2b103c267a06a66e926f62cd96767b280f5cf5.zip |
Import Clang, at r72732.vendor/clang/clang-r72732
Notes
Notes:
svn path=/vendor/clang/dist/; revision=193326
svn path=/vendor/clang/clang-r72732/; revision=193327; tag=vendor/clang/clang-r72732
Diffstat (limited to 'lib/CodeGen')
35 files changed, 26587 insertions, 0 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h new file mode 100644 index 000000000000..3de461242ab0 --- /dev/null +++ b/lib/CodeGen/ABIInfo.h @@ -0,0 +1,133 @@ +//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_ABIINFO_H +#define CLANG_CODEGEN_ABIINFO_H + +namespace llvm { + class Type; +} + +namespace clang { + class ASTContext; + + // FIXME: This is a layering issue if we want to move ABIInfo + // down. Fortunately CGFunctionInfo has no real tie to CodeGen. + namespace CodeGen { + class CGFunctionInfo; + class CodeGenFunction; + } + + /* FIXME: All of this stuff should be part of the target interface + somehow. It is currently here because it is not clear how to factor + the targets to support this, since the Targets currently live in a + layer below types n'stuff. + */ + + /// ABIArgInfo - Helper class to encapsulate information about how a + /// specific C type should be passed to or returned from a function. + class ABIArgInfo { + public: + enum Kind { + Direct, /// Pass the argument directly using the normal + /// converted LLVM type. Complex and structure types + /// are passed using first class aggregates. + + Indirect, /// Pass the argument indirectly via a hidden pointer + /// with the specified alignment (0 indicates default + /// alignment). + + Ignore, /// Ignore the argument (treat as void). Useful for + /// void and empty structs. + + Coerce, /// Only valid for aggregate return types, the argument + /// should be accessed by coercion to a provided type. + + Expand, /// Only valid for aggregate argument types. The + /// structure should be expanded into consecutive + /// arguments for its constituent fields. Currently + /// expand is only allowed on structures whose fields + /// are all scalar types or are themselves expandable + /// types. + + KindFirst=Direct, KindLast=Expand + }; + + private: + Kind TheKind; + const llvm::Type *TypeData; + unsigned UIntData; + + ABIArgInfo(Kind K, const llvm::Type *TD=0, + unsigned UI=0) : TheKind(K), + TypeData(TD), + UIntData(UI) {} + public: + ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {} + + static ABIArgInfo getDirect() { + return ABIArgInfo(Direct); + } + static ABIArgInfo getIgnore() { + return ABIArgInfo(Ignore); + } + static ABIArgInfo getCoerce(const llvm::Type *T) { + return ABIArgInfo(Coerce, T); + } + static ABIArgInfo getIndirect(unsigned Alignment) { + return ABIArgInfo(Indirect, 0, Alignment); + } + static ABIArgInfo getExpand() { + return ABIArgInfo(Expand); + } + + Kind getKind() const { return TheKind; } + bool isDirect() const { return TheKind == Direct; } + bool isIgnore() const { return TheKind == Ignore; } + bool isCoerce() const { return TheKind == Coerce; } + bool isIndirect() const { return TheKind == Indirect; } + bool isExpand() const { return TheKind == Expand; } + + // Coerce accessors + const llvm::Type *getCoerceToType() const { + assert(TheKind == Coerce && "Invalid kind!"); + return TypeData; + } + + // ByVal accessors + unsigned getIndirectAlign() const { + assert(TheKind == Indirect && "Invalid kind!"); + return UIntData; + } + + void dump() const; + }; + + /// ABIInfo - Target specific hooks for defining how a type should be + /// passed or returned from functions. + class ABIInfo { + public: + virtual ~ABIInfo(); + + virtual void computeInfo(CodeGen::CGFunctionInfo &FI, + ASTContext &Ctx) const = 0; + + /// EmitVAArg - Emit the target dependent code to load a value of + /// \arg Ty from the va_list pointed to by \arg VAListAddr. + + // FIXME: This is a gaping layering violation if we wanted to drop + // the ABI information any lower than CodeGen. Of course, for + // VAArg handling it has to be at this level; there is no way to + // abstract this out. + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGen::CodeGenFunction &CGF) const = 0; + }; +} // end namespace clang + +#endif diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp new file mode 100644 index 000000000000..ead689cc01bf --- /dev/null +++ b/lib/CodeGen/CGBlocks.cpp @@ -0,0 +1,1037 @@ +//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit blocks. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/DeclObjC.h" +#include "llvm/Module.h" +#include "llvm/Target/TargetData.h" +#include <algorithm> +using namespace clang; +using namespace CodeGen; + +llvm::Constant *CodeGenFunction:: +BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size, + const llvm::StructType* Ty, + std::vector<HelperInfo> *NoteForHelper) { + const llvm::Type *UnsignedLongTy + = CGM.getTypes().ConvertType(getContext().UnsignedLongTy); + llvm::Constant *C; + std::vector<llvm::Constant*> Elts; + + // reserved + C = llvm::ConstantInt::get(UnsignedLongTy, 0); + Elts.push_back(C); + + // Size + // FIXME: What is the right way to say this doesn't fit? We should give + // a user diagnostic in that case. Better fix would be to change the + // API to size_t. + C = llvm::ConstantInt::get(UnsignedLongTy, Size); + Elts.push_back(C); + + if (BlockHasCopyDispose) { + // copy_func_helper_decl + Elts.push_back(BuildCopyHelper(Ty, NoteForHelper)); + + // destroy_func_decl + Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper)); + } + + C = llvm::ConstantStruct::get(Elts); + + C = new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, "__block_descriptor_tmp", &CGM.getModule()); + return C; +} + +llvm::Constant *BlockModule::getNSConcreteGlobalBlock() { + if (NSConcreteGlobalBlock == 0) + NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, + "_NSConcreteGlobalBlock"); + return NSConcreteGlobalBlock; +} + +llvm::Constant *BlockModule::getNSConcreteStackBlock() { + if (NSConcreteStackBlock == 0) + NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty, + "_NSConcreteStackBlock"); + return NSConcreteStackBlock; +} + +static void CollectBlockDeclRefInfo(const Stmt *S, + CodeGenFunction::BlockInfo &Info) { + for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); + I != E; ++I) + if (*I) + CollectBlockDeclRefInfo(*I, Info); + + if (const BlockDeclRefExpr *DE = dyn_cast<BlockDeclRefExpr>(S)) { + // FIXME: Handle enums. + if (isa<FunctionDecl>(DE->getDecl())) + return; + + if (DE->isByRef()) + Info.ByRefDeclRefs.push_back(DE); + else + Info.ByCopyDeclRefs.push_back(DE); + } +} + +/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be +/// declared as a global variable instead of on the stack. +static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) { + return Info.ByRefDeclRefs.empty() && Info.ByCopyDeclRefs.empty(); +} + +// FIXME: Push most into CGM, passing down a few bits, like current function +// name. +llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { + + std::string Name = CurFn->getName(); + CodeGenFunction::BlockInfo Info(0, Name.c_str()); + CollectBlockDeclRefInfo(BE->getBody(), Info); + + // Check if the block can be global. + // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like + // to just have one code path. We should move this function into CGM and pass + // CGF, then we can just check to see if CGF is 0. + if (0 && CanBlockBeGlobal(Info)) + return CGM.GetAddrOfGlobalBlock(BE, Name.c_str()); + + std::vector<llvm::Constant*> Elts(5); + llvm::Constant *C; + llvm::Value *V; + + { + // C = BuildBlockStructInitlist(); + unsigned int flags = BLOCK_HAS_DESCRIPTOR; + + // We run this first so that we set BlockHasCopyDispose from the entire + // block literal. + // __invoke + uint64_t subBlockSize, subBlockAlign; + llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; + bool subBlockHasCopyDispose = false; + llvm::Function *Fn + = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl, LocalDeclMap, + subBlockSize, + subBlockAlign, + subBlockDeclRefDecls, + subBlockHasCopyDispose); + BlockHasCopyDispose |= subBlockHasCopyDispose; + Elts[3] = Fn; + + // FIXME: Don't use BlockHasCopyDispose, it is set more often then + // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); } + if (subBlockHasCopyDispose) + flags |= BLOCK_HAS_COPY_DISPOSE; + + // __isa + C = CGM.getNSConcreteStackBlock(); + C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty); + Elts[0] = C; + + // __flags + const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( + CGM.getTypes().ConvertType(CGM.getContext().IntTy)); + C = llvm::ConstantInt::get(IntTy, flags); + Elts[1] = C; + + // __reserved + C = llvm::ConstantInt::get(IntTy, 0); + Elts[2] = C; + + if (subBlockDeclRefDecls.size() == 0) { + // __descriptor + Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize, 0, 0); + + // Optimize to being a global block. + Elts[0] = CGM.getNSConcreteGlobalBlock(); + Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL); + + C = llvm::ConstantStruct::get(Elts); + + char Name[32]; + sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount()); + C = new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, Name, &CGM.getModule()); + QualType BPT = BE->getType(); + C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT)); + return C; + } + + std::vector<const llvm::Type *> Types(5+subBlockDeclRefDecls.size()); + for (int i=0; i<4; ++i) + Types[i] = Elts[i]->getType(); + Types[4] = PtrToInt8Ty; + + for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) { + const Expr *E = subBlockDeclRefDecls[i]; + const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); + QualType Ty = E->getType(); + if (BDRE && BDRE->isByRef()) { + uint64_t Align = getContext().getDeclAlignInBytes(BDRE->getDecl()); + Types[i+5] = llvm::PointerType::get(BuildByRefType(Ty, Align), 0); + } else + Types[i+5] = ConvertType(Ty); + } + + llvm::StructType *Ty = llvm::StructType::get(Types, true); + + llvm::AllocaInst *A = CreateTempAlloca(Ty); + A->setAlignment(subBlockAlign); + V = A; + + std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size()); + int helpersize = 0; + + for (unsigned i=0; i<4; ++i) + Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp")); + + for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) + { + // FIXME: Push const down. + Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]); + DeclRefExpr *DR; + ValueDecl *VD; + + DR = dyn_cast<DeclRefExpr>(E); + // Skip padding. + if (DR) continue; + + BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E); + VD = BDRE->getDecl(); + + llvm::Value* Addr = Builder.CreateStructGEP(V, i+5, "tmp"); + NoteForHelper[helpersize].index = i+5; + NoteForHelper[helpersize].RequiresCopying = BlockRequiresCopying(VD->getType()); + NoteForHelper[helpersize].flag + = VD->getType()->isBlockPointerType() ? BLOCK_FIELD_IS_BLOCK : BLOCK_FIELD_IS_OBJECT; + + if (LocalDeclMap[VD]) { + if (BDRE->isByRef()) { + NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | + // FIXME: Someone double check this. + (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); + const llvm::Type *Ty = Types[i+5]; + llvm::Value *Loc = LocalDeclMap[VD]; + Loc = Builder.CreateStructGEP(Loc, 1, "forwarding"); + Loc = Builder.CreateLoad(Loc, false); + Loc = Builder.CreateBitCast(Loc, Ty); + Builder.CreateStore(Loc, Addr); + ++helpersize; + continue; + } else + E = new (getContext()) DeclRefExpr (cast<NamedDecl>(VD), + VD->getType(), SourceLocation(), + false, false); + } + if (BDRE->isByRef()) { + NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF | + // FIXME: Someone double check this. + (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0); + E = new (getContext()) + UnaryOperator(E, UnaryOperator::AddrOf, + getContext().getPointerType(E->getType()), + SourceLocation()); + } + ++helpersize; + + RValue r = EmitAnyExpr(E, Addr, false); + if (r.isScalar()) { + llvm::Value *Loc = r.getScalarVal(); + const llvm::Type *Ty = Types[i+5]; + if (BDRE->isByRef()) { + // E is now the address of the value field, instead, we want the + // address of the actual ByRef struct. We optimize this slightly + // compared to gcc by not grabbing the forwarding slot as this must + // be done during Block_copy for us, and we can postpone the work + // until then. + uint64_t offset = BlockDecls[BDRE->getDecl()]; + + llvm::Value *BlockLiteral = LoadBlockStruct(); + + Loc = Builder.CreateGEP(BlockLiteral, + llvm::ConstantInt::get(llvm::Type::Int64Ty, + offset), + "block.literal"); + Ty = llvm::PointerType::get(Ty, 0); + Loc = Builder.CreateBitCast(Loc, Ty); + Loc = Builder.CreateLoad(Loc, false); + // Loc = Builder.CreateBitCast(Loc, Ty); + } + Builder.CreateStore(Loc, Addr); + } else if (r.isComplex()) + // FIXME: implement + ErrorUnsupported(BE, "complex in block literal"); + else if (r.isAggregate()) + ; // Already created into the destination + else + assert (0 && "bad block variable"); + // FIXME: Ensure that the offset created by the backend for + // the struct matches the previously computed offset in BlockDecls. + } + NoteForHelper.resize(helpersize); + + // __descriptor + llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose, + subBlockSize, Ty, + &NoteForHelper); + Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty); + Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp")); + } + + QualType BPT = BE->getType(); + return Builder.CreateBitCast(V, ConvertType(BPT)); +} + + +const llvm::Type *BlockModule::getBlockDescriptorType() { + if (BlockDescriptorType) + return BlockDescriptorType; + + const llvm::Type *UnsignedLongTy = + getTypes().ConvertType(getContext().UnsignedLongTy); + + // struct __block_descriptor { + // unsigned long reserved; + // unsigned long block_size; + // }; + BlockDescriptorType = llvm::StructType::get(UnsignedLongTy, + UnsignedLongTy, + NULL); + + getModule().addTypeName("struct.__block_descriptor", + BlockDescriptorType); + + return BlockDescriptorType; +} + +const llvm::Type *BlockModule::getGenericBlockLiteralType() { + if (GenericBlockLiteralType) + return GenericBlockLiteralType; + + const llvm::Type *BlockDescPtrTy = + llvm::PointerType::getUnqual(getBlockDescriptorType()); + + const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( + getTypes().ConvertType(getContext().IntTy)); + + // struct __block_literal_generic { + // void *__isa; + // int __flags; + // int __reserved; + // void (*__invoke)(void *); + // struct __block_descriptor *__descriptor; + // }; + GenericBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, + IntTy, + IntTy, + PtrToInt8Ty, + BlockDescPtrTy, + NULL); + + getModule().addTypeName("struct.__block_literal_generic", + GenericBlockLiteralType); + + return GenericBlockLiteralType; +} + +const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() { + if (GenericExtendedBlockLiteralType) + return GenericExtendedBlockLiteralType; + + const llvm::Type *BlockDescPtrTy = + llvm::PointerType::getUnqual(getBlockDescriptorType()); + + const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( + getTypes().ConvertType(getContext().IntTy)); + + // struct __block_literal_generic { + // void *__isa; + // int __flags; + // int __reserved; + // void (*__invoke)(void *); + // struct __block_descriptor *__descriptor; + // void *__copy_func_helper_decl; + // void *__destroy_func_decl; + // }; + GenericExtendedBlockLiteralType = llvm::StructType::get(PtrToInt8Ty, + IntTy, + IntTy, + PtrToInt8Ty, + BlockDescPtrTy, + PtrToInt8Ty, + PtrToInt8Ty, + NULL); + + getModule().addTypeName("struct.__block_literal_extended_generic", + GenericExtendedBlockLiteralType); + + return GenericExtendedBlockLiteralType; +} + +RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E) { + const BlockPointerType *BPT = + E->getCallee()->getType()->getAsBlockPointerType(); + + llvm::Value *Callee = EmitScalarExpr(E->getCallee()); + + // Get a pointer to the generic block literal. + const llvm::Type *BlockLiteralTy = + llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); + + // Bitcast the callee to a block literal. + llvm::Value *BlockLiteral = + Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); + + // Get the function pointer from the literal. + llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp"); + + BlockLiteral = + Builder.CreateBitCast(BlockLiteral, + llvm::PointerType::getUnqual(llvm::Type::Int8Ty), + "tmp"); + + // Add the block literal. + QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy); + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy)); + + QualType FnType = BPT->getPointeeType(); + + // And the rest of the arguments. + EmitCallArgs(Args, FnType->getAsFunctionProtoType(), + E->arg_begin(), E->arg_end()); + + // Load the function. + llvm::Value *Func = Builder.CreateLoad(FuncPtr, false, "tmp"); + + QualType ResultType = FnType->getAsFunctionType()->getResultType(); + + const CGFunctionInfo &FnInfo = + CGM.getTypes().getFunctionInfo(ResultType, Args); + + // Cast the function pointer to the right type. + const llvm::Type *BlockFTy = + CGM.getTypes().GetFunctionType(FnInfo, false); + + const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); + Func = Builder.CreateBitCast(Func, BlockFTyPtr); + + // And call the block. + return EmitCall(FnInfo, Func, Args); +} + +llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { + uint64_t &offset = BlockDecls[E->getDecl()]; + + const llvm::Type *Ty; + Ty = CGM.getTypes().ConvertType(E->getDecl()->getType()); + + // See if we have already allocated an offset for this variable. + if (offset == 0) { + // Don't run the expensive check, unless we have to. + if (!BlockHasCopyDispose && BlockRequiresCopying(E->getType())) + BlockHasCopyDispose = true; + // if not, allocate one now. + offset = getBlockOffset(E); + } + + llvm::Value *BlockLiteral = LoadBlockStruct(); + llvm::Value *V = Builder.CreateGEP(BlockLiteral, + llvm::ConstantInt::get(llvm::Type::Int64Ty, + offset), + "block.literal"); + if (E->isByRef()) { + bool needsCopyDispose = BlockRequiresCopying(E->getType()); + uint64_t Align = getContext().getDeclAlignInBytes(E->getDecl()); + const llvm::Type *PtrStructTy + = llvm::PointerType::get(BuildByRefType(E->getType(), Align), 0); + // The block literal will need a copy/destroy helper. + BlockHasCopyDispose = true; + Ty = PtrStructTy; + Ty = llvm::PointerType::get(Ty, 0); + V = Builder.CreateBitCast(V, Ty); + V = Builder.CreateLoad(V, false); + V = Builder.CreateStructGEP(V, 1, "forwarding"); + V = Builder.CreateLoad(V, false); + V = Builder.CreateBitCast(V, PtrStructTy); + V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); + } else { + Ty = llvm::PointerType::get(Ty, 0); + V = Builder.CreateBitCast(V, Ty); + } + return V; +} + +void CodeGenFunction::BlockForwardSelf() { + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + ImplicitParamDecl *SelfDecl = OMD->getSelfDecl(); + llvm::Value *&DMEntry = LocalDeclMap[SelfDecl]; + if (DMEntry) + return; + // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care + BlockDeclRefExpr *BDRE = new (getContext()) + BlockDeclRefExpr(SelfDecl, + SelfDecl->getType(), SourceLocation(), false); + DMEntry = GetAddrOfBlockDecl(BDRE); +} + +llvm::Constant * +BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) { + // Generate the block descriptor. + const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy); + const llvm::IntegerType *IntTy = cast<llvm::IntegerType>( + getTypes().ConvertType(getContext().IntTy)); + + llvm::Constant *DescriptorFields[2]; + + // Reserved + DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy); + + // Block literal size. For global blocks we just use the size of the generic + // block literal struct. + uint64_t BlockLiteralSize = + TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8; + DescriptorFields[1] = llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize); + + llvm::Constant *DescriptorStruct = + llvm::ConstantStruct::get(&DescriptorFields[0], 2); + + llvm::GlobalVariable *Descriptor = + new llvm::GlobalVariable(DescriptorStruct->getType(), true, + llvm::GlobalVariable::InternalLinkage, + DescriptorStruct, "__block_descriptor_global", + &getModule()); + + // Generate the constants for the block literal. + llvm::Constant *LiteralFields[5]; + + CodeGenFunction::BlockInfo Info(0, n); + uint64_t subBlockSize, subBlockAlign; + llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls; + bool subBlockHasCopyDispose = false; + llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; + llvm::Function *Fn + = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap, + subBlockSize, + subBlockAlign, + subBlockDeclRefDecls, + subBlockHasCopyDispose); + assert(subBlockSize == BlockLiteralSize + && "no imports allowed for global block"); + + // isa + LiteralFields[0] = getNSConcreteGlobalBlock(); + + // Flags + LiteralFields[1] = + llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR); + + // Reserved + LiteralFields[2] = llvm::Constant::getNullValue(IntTy); + + // Function + LiteralFields[3] = Fn; + + // Descriptor + LiteralFields[4] = Descriptor; + + llvm::Constant *BlockLiteralStruct = + llvm::ConstantStruct::get(&LiteralFields[0], 5); + + llvm::GlobalVariable *BlockLiteral = + new llvm::GlobalVariable(BlockLiteralStruct->getType(), true, + llvm::GlobalVariable::InternalLinkage, + BlockLiteralStruct, "__block_literal_global", + &getModule()); + + return BlockLiteral; +} + +llvm::Value *CodeGenFunction::LoadBlockStruct() { + return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self"); +} + +llvm::Function * +CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr, + const BlockInfo& Info, + const Decl *OuterFuncDecl, + llvm::DenseMap<const Decl*, llvm::Value*> ldm, + uint64_t &Size, + uint64_t &Align, + llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls, + bool &subBlockHasCopyDispose) { + + // Check if we should generate debug info for this block. + if (CGM.getDebugInfo()) + DebugInfo = CGM.getDebugInfo(); + + // Arrange for local static and local extern declarations to appear + // to be local to this function as well, as they are directly referenced + // in a block. + for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin(); + i != ldm.end(); + ++i) { + const VarDecl *VD = dyn_cast<VarDecl>(i->first); + + if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage()) + LocalDeclMap[VD] = i->second; + } + + // FIXME: We need to rearrange the code for copy/dispose so we have this + // sooner, so we can calculate offsets correctly. + if (!BlockHasCopyDispose) + BlockOffset = CGM.getTargetData() + .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8; + else + BlockOffset = CGM.getTargetData() + .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8; + BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; + + const FunctionType *BlockFunctionType = BExpr->getFunctionType(); + QualType ResultType; + bool IsVariadic; + if (const FunctionProtoType *FTy = + dyn_cast<FunctionProtoType>(BlockFunctionType)) { + ResultType = FTy->getResultType(); + IsVariadic = FTy->isVariadic(); + } + else { + // K&R style block. + ResultType = BlockFunctionType->getResultType(); + IsVariadic = false; + } + + FunctionArgList Args; + + const BlockDecl *BD = BExpr->getBlockDecl(); + + // FIXME: This leaks + ImplicitParamDecl *SelfDecl = + ImplicitParamDecl::Create(getContext(), 0, + SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + + Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType())); + BlockStructDecl = SelfDecl; + + for (BlockDecl::param_const_iterator i = BD->param_begin(), + e = BD->param_end(); i != e; ++i) + Args.push_back(std::make_pair(*i, (*i)->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(ResultType, Args); + + std::string Name = std::string("__") + Info.Name + "_block_invoke_"; + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic); + + llvm::Function *Fn = + llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, + Name, + &CGM.getModule()); + + CGM.SetInternalFunctionAttributes(BD, Fn, FI); + + StartFunction(BD, ResultType, Fn, Args, + BExpr->getBody()->getLocEnd()); + CurFuncDecl = OuterFuncDecl; + CurCodeDecl = BD; + EmitStmt(BExpr->getBody()); + FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc()); + + // The runtime needs a minimum alignment of a void *. + uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; + BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign); + + Size = BlockOffset; + Align = BlockAlign; + subBlockDeclRefDecls = BlockDeclRefDecls; + subBlockHasCopyDispose |= BlockHasCopyDispose; + return Fn; +} + +uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) { + const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl()); + + uint64_t Size = getContext().getTypeSize(D->getType()) / 8; + uint64_t Align = getContext().getDeclAlignInBytes(D); + + if (BDRE->isByRef()) { + Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8; + Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8; + } + + assert ((Align > 0) && "alignment must be 1 byte or more"); + + uint64_t OldOffset = BlockOffset; + + // Ensure proper alignment, even if it means we have to have a gap + BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align); + BlockAlign = std::max(Align, BlockAlign); + + uint64_t Pad = BlockOffset - OldOffset; + if (Pad) { + llvm::ArrayType::get(llvm::Type::Int8Ty, Pad); + QualType PadTy = getContext().getConstantArrayType(getContext().CharTy, + llvm::APInt(32, Pad), + ArrayType::Normal, 0); + ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(), + 0, QualType(PadTy), VarDecl::None, + SourceLocation()); + Expr *E; + E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(), + SourceLocation(), false, false); + BlockDeclRefDecls.push_back(E); + } + BlockDeclRefDecls.push_back(BDRE); + + BlockOffset += Size; + return BlockOffset-Size; +} + +llvm::Constant *BlockFunction:: +GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, + std::vector<HelperInfo> *NoteForHelperp) { + QualType R = getContext().VoidTy; + + FunctionArgList Args; + // FIXME: This leaks + ImplicitParamDecl *Dst = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + Args.push_back(std::make_pair(Dst, Dst->getType())); + ImplicitParamDecl *Src = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + Args.push_back(std::make_pair(Src, Src->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(R, Args); + + std::string Name = std::string("__copy_helper_block_"); + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); + + llvm::Function *Fn = + llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, + Name, + &CGM.getModule()); + + IdentifierInfo *II + = &CGM.getContext().Idents.get("__copy_helper_block_"); + + FunctionDecl *FD = FunctionDecl::Create(getContext(), + getContext().getTranslationUnitDecl(), + SourceLocation(), II, R, + FunctionDecl::Static, false, + true); + CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); + + llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); + llvm::Type *PtrPtrT; + + if (NoteForHelperp) { + std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; + + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); + SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); + SrcObj = Builder.CreateLoad(SrcObj); + + llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst); + llvm::Type *PtrPtrT; + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); + DstObj = Builder.CreateBitCast(DstObj, PtrPtrT); + DstObj = Builder.CreateLoad(DstObj); + + for (unsigned i=0; i < NoteForHelper.size(); ++i) { + int flag = NoteForHelper[i].flag; + int index = NoteForHelper[i].index; + + if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) + || NoteForHelper[i].RequiresCopying) { + llvm::Value *Srcv = SrcObj; + Srcv = Builder.CreateStructGEP(Srcv, index); + Srcv = Builder.CreateBitCast(Srcv, + llvm::PointerType::get(PtrToInt8Ty, 0)); + Srcv = Builder.CreateLoad(Srcv); + + llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); + Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); + + llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + llvm::Value *F = getBlockObjectAssign(); + Builder.CreateCall3(F, Dstv, Srcv, N); + } + } + } + + CGF.FinishFunction(); + + return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); +} + +llvm::Constant *BlockFunction:: +GenerateDestroyHelperFunction(bool BlockHasCopyDispose, + const llvm::StructType* T, + std::vector<HelperInfo> *NoteForHelperp) { + QualType R = getContext().VoidTy; + + FunctionArgList Args; + // FIXME: This leaks + ImplicitParamDecl *Src = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + + Args.push_back(std::make_pair(Src, Src->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(R, Args); + + std::string Name = std::string("__destroy_helper_block_"); + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); + + llvm::Function *Fn = + llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, + Name, + &CGM.getModule()); + + IdentifierInfo *II + = &CGM.getContext().Idents.get("__destroy_helper_block_"); + + FunctionDecl *FD = FunctionDecl::Create(getContext(), + getContext().getTranslationUnitDecl(), + SourceLocation(), II, R, + FunctionDecl::Static, false, + true); + CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); + + if (NoteForHelperp) { + std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp; + + llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src); + llvm::Type *PtrPtrT; + PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0); + SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT); + SrcObj = Builder.CreateLoad(SrcObj); + + for (unsigned i=0; i < NoteForHelper.size(); ++i) { + int flag = NoteForHelper[i].flag; + int index = NoteForHelper[i].index; + + if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF) + || NoteForHelper[i].RequiresCopying) { + llvm::Value *Srcv = SrcObj; + Srcv = Builder.CreateStructGEP(Srcv, index); + Srcv = Builder.CreateBitCast(Srcv, + llvm::PointerType::get(PtrToInt8Ty, 0)); + Srcv = Builder.CreateLoad(Srcv); + + BuildBlockRelease(Srcv, flag); + } + } + } + + CGF.FinishFunction(); + + return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); +} + +llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T, + std::vector<HelperInfo> *NoteForHelper) { + return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose, + T, NoteForHelper); +} + +llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T, + std::vector<HelperInfo> *NoteForHelperp) { + return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose, + T, NoteForHelperp); +} + +llvm::Constant *BlockFunction:: +GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { + QualType R = getContext().VoidTy; + + FunctionArgList Args; + // FIXME: This leaks + ImplicitParamDecl *Dst = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + Args.push_back(std::make_pair(Dst, Dst->getType())); + + // FIXME: This leaks + ImplicitParamDecl *Src = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + Args.push_back(std::make_pair(Src, Src->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(R, Args); + + std::string Name = std::string("__Block_byref_id_object_copy_"); + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); + + llvm::Function *Fn = + llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, + Name, + &CGM.getModule()); + + IdentifierInfo *II + = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_"); + + FunctionDecl *FD = FunctionDecl::Create(getContext(), + getContext().getTranslationUnitDecl(), + SourceLocation(), II, R, + FunctionDecl::Static, false, + true); + CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); + + // dst->x + llvm::Value *V = CGF.GetAddrOfLocalVar(Dst); + V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); + V = Builder.CreateLoad(V); + V = Builder.CreateStructGEP(V, 6, "x"); + llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty); + + // src->x + V = CGF.GetAddrOfLocalVar(Src); + V = Builder.CreateLoad(V); + V = Builder.CreateBitCast(V, T); + V = Builder.CreateStructGEP(V, 6, "x"); + V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); + llvm::Value *SrcObj = Builder.CreateLoad(V); + + flag |= BLOCK_BYREF_CALLER; + + llvm::Value *N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + llvm::Value *F = getBlockObjectAssign(); + Builder.CreateCall3(F, DstObj, SrcObj, N); + + CGF.FinishFunction(); + + return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); +} + +llvm::Constant * +BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T, + int flag) { + QualType R = getContext().VoidTy; + + FunctionArgList Args; + // FIXME: This leaks + ImplicitParamDecl *Src = + ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0, + getContext().getPointerType(getContext().VoidTy)); + + Args.push_back(std::make_pair(Src, Src->getType())); + + const CGFunctionInfo &FI = + CGM.getTypes().getFunctionInfo(R, Args); + + std::string Name = std::string("__Block_byref_id_object_dispose_"); + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false); + + llvm::Function *Fn = + llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, + Name, + &CGM.getModule()); + + IdentifierInfo *II + = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_"); + + FunctionDecl *FD = FunctionDecl::Create(getContext(), + getContext().getTranslationUnitDecl(), + SourceLocation(), II, R, + FunctionDecl::Static, false, + true); + CGF.StartFunction(FD, R, Fn, Args, SourceLocation()); + + llvm::Value *V = CGF.GetAddrOfLocalVar(Src); + V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0)); + V = Builder.CreateLoad(V); + V = Builder.CreateStructGEP(V, 6, "x"); + V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0)); + V = Builder.CreateLoad(V); + + flag |= BLOCK_BYREF_CALLER; + BuildBlockRelease(V, flag); + CGF.FinishFunction(); + + return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty); +} + +llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T, + int flag) { + return CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag); +} + +llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T, + int flag) { + return CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag); +} + +llvm::Value *BlockFunction::getBlockObjectDispose() { + if (CGM.BlockObjectDispose == 0) { + const llvm::FunctionType *FTy; + std::vector<const llvm::Type*> ArgTys; + const llvm::Type *ResultType = llvm::Type::VoidTy; + ArgTys.push_back(PtrToInt8Ty); + ArgTys.push_back(llvm::Type::Int32Ty); + FTy = llvm::FunctionType::get(ResultType, ArgTys, false); + CGM.BlockObjectDispose + = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); + } + return CGM.BlockObjectDispose; +} + +llvm::Value *BlockFunction::getBlockObjectAssign() { + if (CGM.BlockObjectAssign == 0) { + const llvm::FunctionType *FTy; + std::vector<const llvm::Type*> ArgTys; + const llvm::Type *ResultType = llvm::Type::VoidTy; + ArgTys.push_back(PtrToInt8Ty); + ArgTys.push_back(PtrToInt8Ty); + ArgTys.push_back(llvm::Type::Int32Ty); + FTy = llvm::FunctionType::get(ResultType, ArgTys, false); + CGM.BlockObjectAssign + = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); + } + return CGM.BlockObjectAssign; +} + +void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { + llvm::Value *F = getBlockObjectDispose(); + llvm::Value *N; + V = Builder.CreateBitCast(V, PtrToInt8Ty); + N = llvm::ConstantInt::get(llvm::Type::Int32Ty, flag); + Builder.CreateCall2(F, V, N); +} + +ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } + +BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, + CGBuilderTy &B) + : CGM(cgm), CGF(cgf), Builder(B) { + PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + + BlockHasCopyDispose = false; +} diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h new file mode 100644 index 000000000000..56d3a2d3b10f --- /dev/null +++ b/lib/CodeGen/CGBlocks.h @@ -0,0 +1,223 @@ +//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the internal state used for llvm translation for block literals. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGBLOCKS_H +#define CLANG_CODEGEN_CGBLOCKS_H + +#include "CodeGenTypes.h" +#include "clang/AST/Type.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" + +#include <vector> +#include <map> + +#include "CGBuilder.h" +#include "CGCall.h" +#include "CGValue.h" + +namespace llvm { + class Module; + class Constant; + class Function; + class GlobalValue; + class TargetData; + class FunctionType; + class Value; +} + +namespace clang { + +namespace CodeGen { +class CodeGenModule; + +class BlockBase { +public: + enum { + BLOCK_NEEDS_FREE = (1 << 24), + BLOCK_HAS_COPY_DISPOSE = (1 << 25), + BLOCK_HAS_CXX_OBJ = (1 << 26), + BLOCK_IS_GC = (1 << 27), + BLOCK_IS_GLOBAL = (1 << 28), + BLOCK_HAS_DESCRIPTOR = (1 << 29) + }; +}; + +class BlockModule : public BlockBase { + ASTContext &Context; + llvm::Module &TheModule; + const llvm::TargetData &TheTargetData; + CodeGenTypes &Types; + CodeGenModule &CGM; + + ASTContext &getContext() const { return Context; } + llvm::Module &getModule() const { return TheModule; } + CodeGenTypes &getTypes() { return Types; } + const llvm::TargetData &getTargetData() const { return TheTargetData; } +public: + llvm::Constant *getNSConcreteGlobalBlock(); + llvm::Constant *getNSConcreteStackBlock(); + int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; } + const llvm::Type *getBlockDescriptorType(); + + const llvm::Type *getGenericBlockLiteralType(); + const llvm::Type *getGenericExtendedBlockLiteralType(); + + llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *); + + /// NSConcreteGlobalBlock - Cached reference to the class pointer for global + /// blocks. + llvm::Constant *NSConcreteGlobalBlock; + + /// NSConcreteStackBlock - Cached reference to the class poinnter for stack + /// blocks. + llvm::Constant *NSConcreteStackBlock; + + const llvm::Type *BlockDescriptorType; + const llvm::Type *GenericBlockLiteralType; + const llvm::Type *GenericExtendedBlockLiteralType; + struct { + int GlobalUniqueCount; + } Block; + + llvm::Value *BlockObjectAssign; + llvm::Value *BlockObjectDispose; + const llvm::Type *PtrToInt8Ty; + + BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD, + CodeGenTypes &T, CodeGenModule &CodeGen) + : Context(C), TheModule(M), TheTargetData(TD), Types(T), + CGM(CodeGen), + NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0), + GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0), + BlockObjectAssign(0), BlockObjectDispose(0) { + Block.GlobalUniqueCount = 0; + PtrToInt8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + } +}; + +class BlockFunction : public BlockBase { + CodeGenModule &CGM; + CodeGenFunction &CGF; + ASTContext &getContext() const; + +public: + const llvm::Type *PtrToInt8Ty; + struct HelperInfo { + int index; + int flag; + bool RequiresCopying; + }; + + enum { + BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), + block, ... */ + BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */ + BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block + variable */ + BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy + helpers */ + BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose + support routines */ + }; + + /// BlockInfo - Information to generate a block literal. + struct BlockInfo { + /// BlockLiteralTy - The type of the block literal. + const llvm::Type *BlockLiteralTy; + + /// Name - the name of the function this block was created for, if any. + const char *Name; + + /// ByCopyDeclRefs - Variables from parent scopes that have been imported + /// into this block. + llvm::SmallVector<const BlockDeclRefExpr *, 8> ByCopyDeclRefs; + + // ByRefDeclRefs - __block variables from parent scopes that have been + // imported into this block. + llvm::SmallVector<const BlockDeclRefExpr *, 8> ByRefDeclRefs; + + BlockInfo(const llvm::Type *blt, const char *n) + : BlockLiteralTy(blt), Name(n) { + // Skip asm prefix, if any. + if (Name && Name[0] == '\01') + ++Name; + } + }; + + CGBuilderTy &Builder; + + BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B); + + /// BlockOffset - The offset in bytes for the next allocation of an + /// imported block variable. + uint64_t BlockOffset; + /// BlockAlign - Maximal alignment needed for the Block expressed in bytes. + uint64_t BlockAlign; + + /// getBlockOffset - Allocate an offset for the ValueDecl from a + /// BlockDeclRefExpr in a block literal (BlockExpr). + uint64_t getBlockOffset(const BlockDeclRefExpr *E); + + /// BlockHasCopyDispose - True iff the block uses copy/dispose. + bool BlockHasCopyDispose; + + /// BlockDeclRefDecls - Decls from BlockDeclRefExprs in apperance order + /// in a block literal. Decls without names are used for padding. + llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls; + + /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs. + std::map<const Decl*, uint64_t> BlockDecls; + + ImplicitParamDecl *BlockStructDecl; + ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; } + + llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *, + std::vector<HelperInfo> *); + llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *, + std::vector<HelperInfo> *); + + llvm::Constant *BuildCopyHelper(const llvm::StructType *, + std::vector<HelperInfo> *); + llvm::Constant *BuildDestroyHelper(const llvm::StructType *, + std::vector<HelperInfo> *); + + llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag); + llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int); + + llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag); + llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag); + + llvm::Value *getBlockObjectAssign(); + llvm::Value *getBlockObjectDispose(); + void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF); + + bool BlockRequiresCopying(QualType Ty) { + if (Ty->isBlockPointerType()) + return true; + if (getContext().isObjCNSObjectType(Ty)) + return true; + if (getContext().isObjCObjectPointerType(Ty)) + return true; + return false; + } +}; + +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h new file mode 100644 index 000000000000..ed56bd913779 --- /dev/null +++ b/lib/CodeGen/CGBuilder.h @@ -0,0 +1,26 @@ +//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGBUILDER_H +#define CLANG_CODEGEN_CGBUILDER_H + +#include "llvm/Support/IRBuilder.h" + +namespace clang { +namespace CodeGen { + // Don't preserve names on values in an optimized build. +#ifdef NDEBUG + typedef llvm::IRBuilder<false> CGBuilderTy; +#else + typedef llvm::IRBuilder<> CGBuilderTy; +#endif +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp new file mode 100644 index 000000000000..d813bbae7f06 --- /dev/null +++ b/lib/CodeGen/CGBuiltin.cpp @@ -0,0 +1,1037 @@ +//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Builtin calls as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/TargetBuiltins.h" +#include "llvm/Intrinsics.h" +using namespace clang; +using namespace CodeGen; +using namespace llvm; + +/// Utility to insert an atomic instruction based on Instrinsic::ID +/// and the expression node. +static RValue EmitBinaryAtomic(CodeGenFunction& CGF, + Intrinsic::ID Id, const CallExpr *E) { + const llvm::Type *ResType[2]; + ResType[0] = CGF.ConvertType(E->getType()); + ResType[1] = CGF.ConvertType(E->getArg(0)->getType()); + Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2); + return RValue::get(CGF.Builder.CreateCall2(AtomF, + CGF.EmitScalarExpr(E->getArg(0)), + CGF.EmitScalarExpr(E->getArg(1)))); +} + +/// Utility to insert an atomic instruction based Instrinsic::ID and +// the expression node, where the return value is the result of the +// operation. +static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF, + Intrinsic::ID Id, const CallExpr *E, + Instruction::BinaryOps Op) { + const llvm::Type *ResType[2]; + ResType[0] = CGF.ConvertType(E->getType()); + ResType[1] = CGF.ConvertType(E->getArg(0)->getType()); + Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2); + Value *Ptr = CGF.EmitScalarExpr(E->getArg(0)); + Value *Operand = CGF.EmitScalarExpr(E->getArg(1)); + Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand); + + if (Id == Intrinsic::atomic_load_nand) + Result = CGF.Builder.CreateNot(Result); + + + return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand)); +} + +RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, + unsigned BuiltinID, const CallExpr *E) { + // See if we can constant fold this builtin. If so, don't emit it at all. + Expr::EvalResult Result; + if (E->Evaluate(Result, CGM.getContext())) { + if (Result.Val.isInt()) + return RValue::get(llvm::ConstantInt::get(Result.Val.getInt())); + else if (Result.Val.isFloat()) + return RValue::get(llvm::ConstantFP::get(Result.Val.getFloat())); + } + + switch (BuiltinID) { + default: break; // Handle intrinsics and libm functions below. + case Builtin::BI__builtin___CFStringMakeConstantString: + return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0)); + case Builtin::BI__builtin_stdarg_start: + case Builtin::BI__builtin_va_start: + case Builtin::BI__builtin_va_end: { + Value *ArgValue = EmitVAListRef(E->getArg(0)); + const llvm::Type *DestType = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + if (ArgValue->getType() != DestType) + ArgValue = Builder.CreateBitCast(ArgValue, DestType, + ArgValue->getNameStart()); + + Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ? + Intrinsic::vaend : Intrinsic::vastart; + return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); + } + case Builtin::BI__builtin_va_copy: { + Value *DstPtr = EmitVAListRef(E->getArg(0)); + Value *SrcPtr = EmitVAListRef(E->getArg(1)); + + const llvm::Type *Type = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + + DstPtr = Builder.CreateBitCast(DstPtr, Type); + SrcPtr = Builder.CreateBitCast(SrcPtr, Type); + return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy), + DstPtr, SrcPtr)); + } + case Builtin::BI__builtin_abs: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + Value *NegOp = Builder.CreateNeg(ArgValue, "neg"); + Value *CmpResult = + Builder.CreateICmpSGE(ArgValue, Constant::getNullValue(ArgValue->getType()), + "abscond"); + Value *Result = + Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); + + return RValue::get(Result); + } + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_clz: + case Builtin::BI__builtin_clzl: + case Builtin::BI__builtin_clzll: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_ffs: + case Builtin::BI__builtin_ffsl: + case Builtin::BI__builtin_ffsll: { + // ffs(x) -> x ? cttz(x) + 1 : 0 + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"), + ConstantInt::get(ArgType, 1), "tmp"); + Value *Zero = llvm::Constant::getNullValue(ArgType); + Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); + Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_parity: + case Builtin::BI__builtin_parityl: + case Builtin::BI__builtin_parityll: { + // parity(x) -> ctpop(x) & 1 + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp"); + Value *Result = Builder.CreateAnd(Tmp, ConstantInt::get(ArgType, 1), + "tmp"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_popcount: + case Builtin::BI__builtin_popcountl: + case Builtin::BI__builtin_popcountll: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_expect: + // FIXME: pass expect through to LLVM + return RValue::get(EmitScalarExpr(E->getArg(0))); + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); + return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); + } + case Builtin::BI__builtin_object_size: { + // FIXME: Implement. For now we just always fail and pretend we + // don't know the object size. + llvm::APSInt TypeArg = E->getArg(1)->EvaluateAsInt(CGM.getContext()); + const llvm::Type *ResType = ConvertType(E->getType()); + // bool UseSubObject = TypeArg.getZExtValue() & 1; + bool UseMinimum = TypeArg.getZExtValue() & 2; + return RValue::get(ConstantInt::get(ResType, UseMinimum ? 0 : -1LL)); + } + case Builtin::BI__builtin_prefetch: { + Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); + // FIXME: Technically these constants should of type 'int', yes? + RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : + ConstantInt::get(llvm::Type::Int32Ty, 0); + Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : + ConstantInt::get(llvm::Type::Int32Ty, 3); + Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); + return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); + } + case Builtin::BI__builtin_trap: { + Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0); + return RValue::get(Builder.CreateCall(F)); + } + + case Builtin::BI__builtin_powi: + case Builtin::BI__builtin_powif: + case Builtin::BI__builtin_powil: { + Value *Base = EmitScalarExpr(E->getArg(0)); + Value *Exponent = EmitScalarExpr(E->getArg(1)); + const llvm::Type *ArgType = Base->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1); + return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); + } + + case Builtin::BI__builtin_isgreater: + case Builtin::BI__builtin_isgreaterequal: + case Builtin::BI__builtin_isless: + case Builtin::BI__builtin_islessequal: + case Builtin::BI__builtin_islessgreater: + case Builtin::BI__builtin_isunordered: { + // Ordered comparisons: we know the arguments to these are matching scalar + // floating point values. + Value *LHS = EmitScalarExpr(E->getArg(0)); + Value *RHS = EmitScalarExpr(E->getArg(1)); + + switch (BuiltinID) { + default: assert(0 && "Unknown ordered comparison"); + case Builtin::BI__builtin_isgreater: + LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isgreaterequal: + LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isless: + LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_islessequal: + LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_islessgreater: + LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isunordered: + LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); + break; + } + // ZExt bool to int type. + return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), + "tmp")); + } + case Builtin::BIalloca: + case Builtin::BI__builtin_alloca: { + // FIXME: LLVM IR Should allow alloca with an i64 size! + Value *Size = EmitScalarExpr(E->getArg(0)); + Size = Builder.CreateIntCast(Size, llvm::Type::Int32Ty, false, "tmp"); + return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, Size, "tmp")); + } + case Builtin::BI__builtin_bzero: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Builder.CreateCall4(CGM.getMemSetFn(), Address, + llvm::ConstantInt::get(llvm::Type::Int8Ty, 0), + EmitScalarExpr(E->getArg(1)), + llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + return RValue::get(Address); + } + case Builtin::BI__builtin_memcpy: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Builder.CreateCall4(CGM.getMemCpyFn(), Address, + EmitScalarExpr(E->getArg(1)), + EmitScalarExpr(E->getArg(2)), + llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + return RValue::get(Address); + } + case Builtin::BI__builtin_memmove: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Builder.CreateCall4(CGM.getMemMoveFn(), Address, + EmitScalarExpr(E->getArg(1)), + EmitScalarExpr(E->getArg(2)), + llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + return RValue::get(Address); + } + case Builtin::BI__builtin_memset: { + Value *Address = EmitScalarExpr(E->getArg(0)); + Builder.CreateCall4(CGM.getMemSetFn(), Address, + Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), + llvm::Type::Int8Ty), + EmitScalarExpr(E->getArg(2)), + llvm::ConstantInt::get(llvm::Type::Int32Ty, 1)); + return RValue::get(Address); + } + case Builtin::BI__builtin_return_address: { + Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); + return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0)))); + } + case Builtin::BI__builtin_frame_address: { + Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); + return RValue::get(Builder.CreateCall(F, EmitScalarExpr(E->getArg(0)))); + } + case Builtin::BI__builtin_extract_return_addr: { + // FIXME: There should be a target hook for this + return RValue::get(EmitScalarExpr(E->getArg(0))); + } + case Builtin::BI__builtin_unwind_init: { + Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0); + return RValue::get(Builder.CreateCall(F)); + } +#if 0 + // FIXME: Finish/enable when LLVM backend support stabilizes + case Builtin::BI__builtin_setjmp: { + Value *Buf = EmitScalarExpr(E->getArg(0)); + // Store the frame pointer to the buffer + Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); + Value *FrameAddr = + Builder.CreateCall(FrameAddrF, + Constant::getNullValue(llvm::Type::Int32Ty)); + Builder.CreateStore(FrameAddr, Buf); + // Call the setjmp intrinsic + Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0); + const llvm::Type *DestType = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Buf = Builder.CreateBitCast(Buf, DestType); + return RValue::get(Builder.CreateCall(F, Buf)); + } + case Builtin::BI__builtin_longjmp: { + Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0); + Value *Buf = EmitScalarExpr(E->getArg(0)); + const llvm::Type *DestType = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Buf = Builder.CreateBitCast(Buf, DestType); + return RValue::get(Builder.CreateCall(F, Buf)); + } +#endif + case Builtin::BI__sync_fetch_and_add: + case Builtin::BI__sync_fetch_and_sub: + case Builtin::BI__sync_fetch_and_or: + case Builtin::BI__sync_fetch_and_and: + case Builtin::BI__sync_fetch_and_xor: + case Builtin::BI__sync_add_and_fetch: + case Builtin::BI__sync_sub_and_fetch: + case Builtin::BI__sync_and_and_fetch: + case Builtin::BI__sync_or_and_fetch: + case Builtin::BI__sync_xor_and_fetch: + case Builtin::BI__sync_val_compare_and_swap: + case Builtin::BI__sync_bool_compare_and_swap: + case Builtin::BI__sync_lock_test_and_set: + case Builtin::BI__sync_lock_release: + assert(0 && "Shouldn't make it through sema"); + case Builtin::BI__sync_fetch_and_add_1: + case Builtin::BI__sync_fetch_and_add_2: + case Builtin::BI__sync_fetch_and_add_4: + case Builtin::BI__sync_fetch_and_add_8: + case Builtin::BI__sync_fetch_and_add_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E); + case Builtin::BI__sync_fetch_and_sub_1: + case Builtin::BI__sync_fetch_and_sub_2: + case Builtin::BI__sync_fetch_and_sub_4: + case Builtin::BI__sync_fetch_and_sub_8: + case Builtin::BI__sync_fetch_and_sub_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E); + case Builtin::BI__sync_fetch_and_or_1: + case Builtin::BI__sync_fetch_and_or_2: + case Builtin::BI__sync_fetch_and_or_4: + case Builtin::BI__sync_fetch_and_or_8: + case Builtin::BI__sync_fetch_and_or_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E); + case Builtin::BI__sync_fetch_and_and_1: + case Builtin::BI__sync_fetch_and_and_2: + case Builtin::BI__sync_fetch_and_and_4: + case Builtin::BI__sync_fetch_and_and_8: + case Builtin::BI__sync_fetch_and_and_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E); + case Builtin::BI__sync_fetch_and_xor_1: + case Builtin::BI__sync_fetch_and_xor_2: + case Builtin::BI__sync_fetch_and_xor_4: + case Builtin::BI__sync_fetch_and_xor_8: + case Builtin::BI__sync_fetch_and_xor_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E); + case Builtin::BI__sync_fetch_and_nand_1: + case Builtin::BI__sync_fetch_and_nand_2: + case Builtin::BI__sync_fetch_and_nand_4: + case Builtin::BI__sync_fetch_and_nand_8: + case Builtin::BI__sync_fetch_and_nand_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E); + + // Clang extensions: not overloaded yet. + case Builtin::BI__sync_fetch_and_min: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E); + case Builtin::BI__sync_fetch_and_max: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E); + case Builtin::BI__sync_fetch_and_umin: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E); + case Builtin::BI__sync_fetch_and_umax: + return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E); + + case Builtin::BI__sync_add_and_fetch_1: + case Builtin::BI__sync_add_and_fetch_2: + case Builtin::BI__sync_add_and_fetch_4: + case Builtin::BI__sync_add_and_fetch_8: + case Builtin::BI__sync_add_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E, + llvm::Instruction::Add); + case Builtin::BI__sync_sub_and_fetch_1: + case Builtin::BI__sync_sub_and_fetch_2: + case Builtin::BI__sync_sub_and_fetch_4: + case Builtin::BI__sync_sub_and_fetch_8: + case Builtin::BI__sync_sub_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E, + llvm::Instruction::Sub); + case Builtin::BI__sync_and_and_fetch_1: + case Builtin::BI__sync_and_and_fetch_2: + case Builtin::BI__sync_and_and_fetch_4: + case Builtin::BI__sync_and_and_fetch_8: + case Builtin::BI__sync_and_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E, + llvm::Instruction::And); + case Builtin::BI__sync_or_and_fetch_1: + case Builtin::BI__sync_or_and_fetch_2: + case Builtin::BI__sync_or_and_fetch_4: + case Builtin::BI__sync_or_and_fetch_8: + case Builtin::BI__sync_or_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E, + llvm::Instruction::Or); + case Builtin::BI__sync_xor_and_fetch_1: + case Builtin::BI__sync_xor_and_fetch_2: + case Builtin::BI__sync_xor_and_fetch_4: + case Builtin::BI__sync_xor_and_fetch_8: + case Builtin::BI__sync_xor_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E, + llvm::Instruction::Xor); + case Builtin::BI__sync_nand_and_fetch_1: + case Builtin::BI__sync_nand_and_fetch_2: + case Builtin::BI__sync_nand_and_fetch_4: + case Builtin::BI__sync_nand_and_fetch_8: + case Builtin::BI__sync_nand_and_fetch_16: + return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E, + llvm::Instruction::And); + + case Builtin::BI__sync_val_compare_and_swap_1: + case Builtin::BI__sync_val_compare_and_swap_2: + case Builtin::BI__sync_val_compare_and_swap_4: + case Builtin::BI__sync_val_compare_and_swap_8: + case Builtin::BI__sync_val_compare_and_swap_16: + { + const llvm::Type *ResType[2]; + ResType[0]= ConvertType(E->getType()); + ResType[1] = ConvertType(E->getArg(0)->getType()); + Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2); + return RValue::get(Builder.CreateCall3(AtomF, + EmitScalarExpr(E->getArg(0)), + EmitScalarExpr(E->getArg(1)), + EmitScalarExpr(E->getArg(2)))); + } + + case Builtin::BI__sync_bool_compare_and_swap_1: + case Builtin::BI__sync_bool_compare_and_swap_2: + case Builtin::BI__sync_bool_compare_and_swap_4: + case Builtin::BI__sync_bool_compare_and_swap_8: + case Builtin::BI__sync_bool_compare_and_swap_16: + { + const llvm::Type *ResType[2]; + ResType[0]= ConvertType(E->getArg(1)->getType()); + ResType[1] = llvm::PointerType::getUnqual(ResType[0]); + Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2); + Value *OldVal = EmitScalarExpr(E->getArg(1)); + Value *PrevVal = Builder.CreateCall3(AtomF, + EmitScalarExpr(E->getArg(0)), + OldVal, + EmitScalarExpr(E->getArg(2))); + Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal); + // zext bool to int. + return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType()))); + } + + case Builtin::BI__sync_lock_test_and_set_1: + case Builtin::BI__sync_lock_test_and_set_2: + case Builtin::BI__sync_lock_test_and_set_4: + case Builtin::BI__sync_lock_test_and_set_8: + case Builtin::BI__sync_lock_test_and_set_16: + return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); + case Builtin::BI__sync_lock_release_1: + case Builtin::BI__sync_lock_release_2: + case Builtin::BI__sync_lock_release_4: + case Builtin::BI__sync_lock_release_8: + case Builtin::BI__sync_lock_release_16: { + Value *Ptr = EmitScalarExpr(E->getArg(0)); + const llvm::Type *ElTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr, true); + return RValue::get(0); + } + + case Builtin::BI__sync_synchronize: { + Value *C[5]; + C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::Int1Ty, 1); + C[4] = ConstantInt::get(llvm::Type::Int1Ty, 0); + Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5); + return RValue::get(0); + } + + // Library functions with special handling. + case Builtin::BIsqrt: + case Builtin::BIsqrtf: + case Builtin::BIsqrtl: { + // Rewrite sqrt to intrinsic if allowed. + if (!FD->hasAttr<ConstAttr>()) + break; + Value *Arg0 = EmitScalarExpr(E->getArg(0)); + const llvm::Type *ArgType = Arg0->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1); + return RValue::get(Builder.CreateCall(F, Arg0, "tmp")); + } + + case Builtin::BIpow: + case Builtin::BIpowf: + case Builtin::BIpowl: { + // Rewrite sqrt to intrinsic if allowed. + if (!FD->hasAttr<ConstAttr>()) + break; + Value *Base = EmitScalarExpr(E->getArg(0)); + Value *Exponent = EmitScalarExpr(E->getArg(1)); + const llvm::Type *ArgType = Base->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1); + return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp")); + } + } + + // If this is an alias for a libm function (e.g. __builtin_sin) turn it into + // that function. + if (getContext().BuiltinInfo.isLibFunction(BuiltinID) || + getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) + return EmitCall(CGM.getBuiltinLibFunction(BuiltinID), + E->getCallee()->getType(), E->arg_begin(), + E->arg_end()); + + // See if we have a target specific intrinsic. + const char *Name = getContext().BuiltinInfo.GetName(BuiltinID); + Intrinsic::ID IntrinsicID = + Intrinsic::getIntrinsicForGCCBuiltin(Target.getTargetPrefix(), Name); + + if (IntrinsicID != Intrinsic::not_intrinsic) { + SmallVector<Value*, 16> Args; + + Function *F = CGM.getIntrinsic(IntrinsicID); + const llvm::FunctionType *FTy = F->getFunctionType(); + + for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { + Value *ArgValue = EmitScalarExpr(E->getArg(i)); + + // If the intrinsic arg type is different from the builtin arg type + // we need to do a bit cast. + const llvm::Type *PTy = FTy->getParamType(i); + if (PTy != ArgValue->getType()) { + assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && + "Must be able to losslessly bit cast to param"); + ArgValue = Builder.CreateBitCast(ArgValue, PTy); + } + + Args.push_back(ArgValue); + } + + Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size()); + QualType BuiltinRetType = E->getType(); + + const llvm::Type *RetTy = llvm::Type::VoidTy; + if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); + + if (RetTy != V->getType()) { + assert(V->getType()->canLosslesslyBitCastTo(RetTy) && + "Must be able to losslessly bit cast result type"); + V = Builder.CreateBitCast(V, RetTy); + } + + return RValue::get(V); + } + + // See if we have a target specific builtin that needs to be lowered. + if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E)) + return RValue::get(V); + + ErrorUnsupported(E, "builtin function"); + + // Unknown builtin, for now just dump it out and return undef. + if (hasAggregateLLVMType(E->getType())) + return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType()))); + return RValue::get(UndefValue::get(ConvertType(E->getType()))); +} + +Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + const char *TargetPrefix = Target.getTargetPrefix(); + if (strcmp(TargetPrefix, "x86") == 0) + return EmitX86BuiltinExpr(BuiltinID, E); + else if (strcmp(TargetPrefix, "ppc") == 0) + return EmitPPCBuiltinExpr(BuiltinID, E); + return 0; +} + +Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + + llvm::SmallVector<Value*, 4> Ops; + + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) + Ops.push_back(EmitScalarExpr(E->getArg(i))); + + switch (BuiltinID) { + default: return 0; + case X86::BI__builtin_ia32_mulps: + return Builder.CreateMul(Ops[0], Ops[1], "mulps"); + case X86::BI__builtin_ia32_mulpd: + return Builder.CreateMul(Ops[0], Ops[1], "mulpd"); + case X86::BI__builtin_ia32_pand: + case X86::BI__builtin_ia32_pand128: + return Builder.CreateAnd(Ops[0], Ops[1], "pand"); + case X86::BI__builtin_ia32_por: + case X86::BI__builtin_ia32_por128: + return Builder.CreateOr(Ops[0], Ops[1], "por"); + case X86::BI__builtin_ia32_pxor: + case X86::BI__builtin_ia32_pxor128: + return Builder.CreateXor(Ops[0], Ops[1], "pxor"); + case X86::BI__builtin_ia32_pandn: + case X86::BI__builtin_ia32_pandn128: + Ops[0] = Builder.CreateNot(Ops[0], "tmp"); + return Builder.CreateAnd(Ops[0], Ops[1], "pandn"); + case X86::BI__builtin_ia32_paddb: + case X86::BI__builtin_ia32_paddb128: + case X86::BI__builtin_ia32_paddd: + case X86::BI__builtin_ia32_paddd128: + case X86::BI__builtin_ia32_paddq: + case X86::BI__builtin_ia32_paddq128: + case X86::BI__builtin_ia32_paddw: + case X86::BI__builtin_ia32_paddw128: + case X86::BI__builtin_ia32_addps: + case X86::BI__builtin_ia32_addpd: + return Builder.CreateAdd(Ops[0], Ops[1], "add"); + case X86::BI__builtin_ia32_psubb: + case X86::BI__builtin_ia32_psubb128: + case X86::BI__builtin_ia32_psubd: + case X86::BI__builtin_ia32_psubd128: + case X86::BI__builtin_ia32_psubq: + case X86::BI__builtin_ia32_psubq128: + case X86::BI__builtin_ia32_psubw: + case X86::BI__builtin_ia32_psubw128: + case X86::BI__builtin_ia32_subps: + case X86::BI__builtin_ia32_subpd: + return Builder.CreateSub(Ops[0], Ops[1], "sub"); + case X86::BI__builtin_ia32_divps: + return Builder.CreateFDiv(Ops[0], Ops[1], "divps"); + case X86::BI__builtin_ia32_divpd: + return Builder.CreateFDiv(Ops[0], Ops[1], "divpd"); + case X86::BI__builtin_ia32_pmullw: + case X86::BI__builtin_ia32_pmullw128: + return Builder.CreateMul(Ops[0], Ops[1], "pmul"); + case X86::BI__builtin_ia32_punpckhbw: + return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15, + "punpckhbw"); + case X86::BI__builtin_ia32_punpckhbw128: + return EmitShuffleVector(Ops[0], Ops[1], 8, 24, 9, 25, 10, 26, 11, 27, + 12, 28, 13, 29, 14, 30, 15, 31, + "punpckhbw"); + case X86::BI__builtin_ia32_punpckhwd: + return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhwd"); + case X86::BI__builtin_ia32_punpckhwd128: + return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15, + "punpckhwd"); + case X86::BI__builtin_ia32_punpckhdq: + return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhdq"); + case X86::BI__builtin_ia32_punpckhdq128: + return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhdq"); + case X86::BI__builtin_ia32_punpckhqdq128: + return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhqdq"); + case X86::BI__builtin_ia32_punpcklbw: + return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11, + "punpcklbw"); + case X86::BI__builtin_ia32_punpcklwd: + return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpcklwd"); + case X86::BI__builtin_ia32_punpckldq: + return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpckldq"); + case X86::BI__builtin_ia32_punpckldq128: + return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpckldq"); + case X86::BI__builtin_ia32_punpcklqdq128: + return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpcklqdq"); + case X86::BI__builtin_ia32_pslldi128: + case X86::BI__builtin_ia32_psllqi128: + case X86::BI__builtin_ia32_psllwi128: + case X86::BI__builtin_ia32_psradi128: + case X86::BI__builtin_ia32_psrawi128: + case X86::BI__builtin_ia32_psrldi128: + case X86::BI__builtin_ia32_psrlqi128: + case X86::BI__builtin_ia32_psrlwi128: { + Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 2); + llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), + Ops[1], Zero, "insert"); + Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); + const char *name = 0; + Intrinsic::ID ID = Intrinsic::not_intrinsic; + + switch (BuiltinID) { + default: assert(0 && "Unsupported shift intrinsic!"); + case X86::BI__builtin_ia32_pslldi128: + name = "pslldi"; + ID = Intrinsic::x86_sse2_psll_d; + break; + case X86::BI__builtin_ia32_psllqi128: + name = "psllqi"; + ID = Intrinsic::x86_sse2_psll_q; + break; + case X86::BI__builtin_ia32_psllwi128: + name = "psllwi"; + ID = Intrinsic::x86_sse2_psll_w; + break; + case X86::BI__builtin_ia32_psradi128: + name = "psradi"; + ID = Intrinsic::x86_sse2_psra_d; + break; + case X86::BI__builtin_ia32_psrawi128: + name = "psrawi"; + ID = Intrinsic::x86_sse2_psra_w; + break; + case X86::BI__builtin_ia32_psrldi128: + name = "psrldi"; + ID = Intrinsic::x86_sse2_psrl_d; + break; + case X86::BI__builtin_ia32_psrlqi128: + name = "psrlqi"; + ID = Intrinsic::x86_sse2_psrl_q; + break; + case X86::BI__builtin_ia32_psrlwi128: + name = "psrlwi"; + ID = Intrinsic::x86_sse2_psrl_w; + break; + } + llvm::Function *F = CGM.getIntrinsic(ID); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); + } + case X86::BI__builtin_ia32_pslldi: + case X86::BI__builtin_ia32_psllqi: + case X86::BI__builtin_ia32_psllwi: + case X86::BI__builtin_ia32_psradi: + case X86::BI__builtin_ia32_psrawi: + case X86::BI__builtin_ia32_psrldi: + case X86::BI__builtin_ia32_psrlqi: + case X86::BI__builtin_ia32_psrlwi: { + Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); + const char *name = 0; + Intrinsic::ID ID = Intrinsic::not_intrinsic; + + switch (BuiltinID) { + default: assert(0 && "Unsupported shift intrinsic!"); + case X86::BI__builtin_ia32_pslldi: + name = "pslldi"; + ID = Intrinsic::x86_mmx_psll_d; + break; + case X86::BI__builtin_ia32_psllqi: + name = "psllqi"; + ID = Intrinsic::x86_mmx_psll_q; + break; + case X86::BI__builtin_ia32_psllwi: + name = "psllwi"; + ID = Intrinsic::x86_mmx_psll_w; + break; + case X86::BI__builtin_ia32_psradi: + name = "psradi"; + ID = Intrinsic::x86_mmx_psra_d; + break; + case X86::BI__builtin_ia32_psrawi: + name = "psrawi"; + ID = Intrinsic::x86_mmx_psra_w; + break; + case X86::BI__builtin_ia32_psrldi: + name = "psrldi"; + ID = Intrinsic::x86_mmx_psrl_d; + break; + case X86::BI__builtin_ia32_psrlqi: + name = "psrlqi"; + ID = Intrinsic::x86_mmx_psrl_q; + break; + case X86::BI__builtin_ia32_psrlwi: + name = "psrlwi"; + ID = Intrinsic::x86_mmx_psrl_w; + break; + } + llvm::Function *F = CGM.getIntrinsic(ID); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); + } + case X86::BI__builtin_ia32_pshufw: { + unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[0], + i & 0x3, (i & 0xc) >> 2, + (i & 0x30) >> 4, (i & 0xc0) >> 6, + "pshufw"); + } + case X86::BI__builtin_ia32_pshuflw: { + unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[0], + i & 0x3, (i & 0xc) >> 2, + (i & 0x30) >> 4, (i & 0xc0) >> 6, 4, 5, 6, 7, + "pshuflw"); + } + case X86::BI__builtin_ia32_pshufhw: { + unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[0], 0, 1, 2, 3, + 4 + (i & 0x3), 4 + ((i & 0xc) >> 2), + 4 + ((i & 0x30) >> 4), 4 + ((i & 0xc0) >> 6), + "pshufhw"); + } + case X86::BI__builtin_ia32_pshufd: { + unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[0], + i & 0x3, (i & 0xc) >> 2, + (i & 0x30) >> 4, (i & 0xc0) >> 6, + "pshufd"); + } + case X86::BI__builtin_ia32_vec_init_v4hi: + case X86::BI__builtin_ia32_vec_init_v8qi: + case X86::BI__builtin_ia32_vec_init_v2si: + return EmitVector(&Ops[0], Ops.size()); + case X86::BI__builtin_ia32_vec_ext_v2si: + case X86::BI__builtin_ia32_vec_ext_v2di: + case X86::BI__builtin_ia32_vec_ext_v4sf: + case X86::BI__builtin_ia32_vec_ext_v4si: + case X86::BI__builtin_ia32_vec_ext_v8hi: + case X86::BI__builtin_ia32_vec_ext_v4hi: + case X86::BI__builtin_ia32_vec_ext_v2df: + return Builder.CreateExtractElement(Ops[0], Ops[1], "result"); + case X86::BI__builtin_ia32_cmpps: { + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps"); + } + case X86::BI__builtin_ia32_cmpss: { + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss"); + } + case X86::BI__builtin_ia32_ldmxcsr: { + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); + Builder.CreateStore(Ops[0], Tmp); + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), + Builder.CreateBitCast(Tmp, PtrTy)); + } + case X86::BI__builtin_ia32_stmxcsr: { + llvm::Type *PtrTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(llvm::Type::Int32Ty, One, "tmp"); + One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), + Builder.CreateBitCast(Tmp, PtrTy)); + return Builder.CreateLoad(Tmp, "stmxcsr"); + } + case X86::BI__builtin_ia32_cmppd: { + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd"); + } + case X86::BI__builtin_ia32_cmpsd: { + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd"); + } + case X86::BI__builtin_ia32_movss: + return EmitShuffleVector(Ops[0], Ops[1], 4, 1, 2, 3, "movss"); + case X86::BI__builtin_ia32_shufps: { + unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[1], + i & 0x3, (i & 0xc) >> 2, + ((i & 0x30) >> 4) + 4, + ((i & 0xc0) >> 6) + 4, "shufps"); + } + case X86::BI__builtin_ia32_shufpd: { + unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[1], i & 1, + ((i & 2) >> 1)+2, "shufpd"); + } + case X86::BI__builtin_ia32_punpcklbw128: + return EmitShuffleVector(Ops[0], Ops[1], 0, 16, 1, 17, 2, 18, 3, 19, + 4, 20, 5, 21, 6, 22, 7, 23, + "punpcklbw"); + case X86::BI__builtin_ia32_punpcklwd128: + return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11, + "punpcklwd"); + case X86::BI__builtin_ia32_movlhps: + return EmitShuffleVector(Ops[0], Ops[1], 0, 1, 4, 5, "movlhps"); + case X86::BI__builtin_ia32_movhlps: + return EmitShuffleVector(Ops[0], Ops[1], 6, 7, 2, 3, "movhlps"); + case X86::BI__builtin_ia32_unpckhps: + return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "unpckhps"); + case X86::BI__builtin_ia32_unpcklps: + return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "unpcklps"); + case X86::BI__builtin_ia32_unpckhpd: + return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "unpckhpd"); + case X86::BI__builtin_ia32_unpcklpd: + return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "unpcklpd"); + case X86::BI__builtin_ia32_movsd: + return EmitShuffleVector(Ops[0], Ops[1], 2, 1, "movsd"); + case X86::BI__builtin_ia32_loadlps: + case X86::BI__builtin_ia32_loadhps: { + // FIXME: This should probably be represented as + // shuffle (dst, (v4f32 (insert undef, (load i64), 0)), shuf mask hi/lo) + const llvm::Type *EltTy = llvm::Type::DoubleTy; + const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); + const llvm::Type *OrigTy = Ops[0]->getType(); + unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlps ? 0 : 1; + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index); + Ops[1] = Builder.CreateBitCast(Ops[1], llvm::PointerType::getUnqual(EltTy)); + Ops[1] = Builder.CreateLoad(Ops[1], "tmp"); + Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); + Ops[0] = Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadps"); + return Builder.CreateBitCast(Ops[0], OrigTy, "loadps"); + } + case X86::BI__builtin_ia32_loadlpd: + case X86::BI__builtin_ia32_loadhpd: { + Ops[1] = Builder.CreateLoad(Ops[1], "tmp"); + unsigned Index = BuiltinID == X86::BI__builtin_ia32_loadlpd ? 0 : 1; + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index); + return Builder.CreateInsertElement(Ops[0], Ops[1], Idx, "loadpd"); + } + case X86::BI__builtin_ia32_storehps: + case X86::BI__builtin_ia32_storelps: { + const llvm::Type *EltTy = llvm::Type::Int64Ty; + llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); + llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); + + // cast val v2i64 + Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); + + // extract (0, 1) + unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Index); + Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); + + // cast pointer to i64 & store + Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); + return Builder.CreateStore(Ops[1], Ops[0]); + } + case X86::BI__builtin_ia32_loadlv4si: { + // load i64 + const llvm::Type *EltTy = llvm::Type::Int64Ty; + llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); + Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); + Ops[0] = Builder.CreateLoad(Ops[0], "load"); + + // scalar to vector: insert i64 into 2 x i64 undef + llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); + llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + Ops[0] = Builder.CreateInsertElement(llvm::UndefValue::get(VecTy), + Ops[0], Zero, "s2v"); + + // shuffle into zero vector. + std::vector<llvm::Constant *>Elts; + Elts.resize(2, llvm::ConstantInt::get(EltTy, 0)); + llvm::Value *ZV = ConstantVector::get(Elts); + Ops[0] = EmitShuffleVector(ZV, Ops[0], 2, 1, "loadl"); + + // bitcast to result. + return Builder.CreateBitCast(Ops[0], + llvm::VectorType::get(llvm::Type::Int32Ty, 4)); + } + case X86::BI__builtin_ia32_vec_set_v4hi: + case X86::BI__builtin_ia32_vec_set_v8hi: + return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrw"); + case X86::BI__builtin_ia32_vec_set_v4si: + return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrd"); + case X86::BI__builtin_ia32_vec_set_v2di: + return Builder.CreateInsertElement(Ops[0], Ops[1], Ops[2], "pinsrq"); + case X86::BI__builtin_ia32_andps: + case X86::BI__builtin_ia32_andpd: + case X86::BI__builtin_ia32_andnps: + case X86::BI__builtin_ia32_andnpd: + case X86::BI__builtin_ia32_orps: + case X86::BI__builtin_ia32_orpd: + case X86::BI__builtin_ia32_xorpd: + case X86::BI__builtin_ia32_xorps: { + const llvm::Type *ITy = llvm::VectorType::get(llvm::Type::Int32Ty, 4); + const llvm::Type *FTy = Ops[0]->getType(); + Ops[0] = Builder.CreateBitCast(Ops[0], ITy, "bitcast"); + Ops[1] = Builder.CreateBitCast(Ops[1], ITy, "bitcast"); + switch (BuiltinID) { + case X86::BI__builtin_ia32_andps: + Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andps"); + break; + case X86::BI__builtin_ia32_andpd: + Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andpd"); + break; + case X86::BI__builtin_ia32_andnps: + Ops[0] = Builder.CreateNot(Ops[0], "not"); + Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnps"); + break; + case X86::BI__builtin_ia32_andnpd: + Ops[0] = Builder.CreateNot(Ops[0], "not"); + Ops[0] = Builder.CreateAnd(Ops[0], Ops[1], "andnpd"); + break; + case X86::BI__builtin_ia32_orps: + Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orps"); + break; + case X86::BI__builtin_ia32_orpd: + Ops[0] = Builder.CreateOr(Ops[0], Ops[1], "orpd"); + break; + case X86::BI__builtin_ia32_xorps: + Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorps"); + break; + case X86::BI__builtin_ia32_xorpd: + Ops[0] = Builder.CreateXor(Ops[0], Ops[1], "xorpd"); + break; + } + return Builder.CreateBitCast(Ops[0], FTy, "bitcast"); + } + } +} + +Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + switch (BuiltinID) { + default: return 0; + } +} diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp new file mode 100644 index 000000000000..731e38c5146d --- /dev/null +++ b/lib/CodeGen/CGCXX.cpp @@ -0,0 +1,454 @@ +//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code dealing with C++ code generation. +// +//===----------------------------------------------------------------------===// + +// We might split this into multiple files if it gets too unwieldy + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "Mangle.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "llvm/ADT/StringExtras.h" +using namespace clang; +using namespace CodeGen; + +void +CodeGenFunction::GenerateStaticCXXBlockVarDeclInit(const VarDecl &D, + llvm::GlobalVariable *GV) { + // FIXME: This should use __cxa_guard_{acquire,release}? + + assert(!getContext().getLangOptions().ThreadsafeStatics && + "thread safe statics are currently not supported!"); + + llvm::SmallString<256> GuardVName; + llvm::raw_svector_ostream GuardVOut(GuardVName); + mangleGuardVariable(&D, getContext(), GuardVOut); + + // Create the guard variable. + llvm::GlobalValue *GuardV = + new llvm::GlobalVariable(llvm::Type::Int64Ty, false, + GV->getLinkage(), + llvm::Constant::getNullValue(llvm::Type::Int64Ty), + GuardVName.c_str(), + &CGM.getModule()); + + // Load the first byte of the guard variable. + const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::Int8Ty, 0); + llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy), + "tmp"); + + // Compare it against 0. + llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::Int8Ty); + llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool"); + + llvm::BasicBlock *InitBlock = createBasicBlock("init"); + llvm::BasicBlock *EndBlock = createBasicBlock("init.end"); + + // If the guard variable is 0, jump to the initializer code. + Builder.CreateCondBr(ICmp, InitBlock, EndBlock); + + EmitBlock(InitBlock); + + const Expr *Init = D.getInit(); + if (!hasAggregateLLVMType(Init->getType())) { + llvm::Value *V = EmitScalarExpr(Init); + Builder.CreateStore(V, GV, D.getType().isVolatileQualified()); + } else if (Init->getType()->isAnyComplexType()) { + EmitComplexExprIntoAddr(Init, GV, D.getType().isVolatileQualified()); + } else { + EmitAggExpr(Init, GV, D.getType().isVolatileQualified()); + } + + Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::Int8Ty, 1), + Builder.CreateBitCast(GuardV, PtrTy)); + + EmitBlock(EndBlock); +} + +RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD, + llvm::Value *Callee, + llvm::Value *This, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd) { + assert(MD->isInstance() && + "Trying to emit a member call expr on a static method!"); + + const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType(); + + CallArgList Args; + + // Push the this ptr. + Args.push_back(std::make_pair(RValue::get(This), + MD->getThisType(getContext()))); + + // And the rest of the call args + EmitCallArgs(Args, FPT, ArgBeg, ArgEnd); + + QualType ResultType = MD->getType()->getAsFunctionType()->getResultType(); + return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), + Callee, Args, MD); +} + +RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE) { + const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()); + const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); + + const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType(); + const llvm::Type *Ty = + CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), + FPT->isVariadic()); + llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty); + + llvm::Value *This; + + if (ME->isArrow()) + This = EmitScalarExpr(ME->getBase()); + else { + LValue BaseLV = EmitLValue(ME->getBase()); + This = BaseLV.getAddress(); + } + + return EmitCXXMemberCall(MD, Callee, This, + CE->arg_begin(), CE->arg_end()); +} + +RValue +CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, + const CXXMethodDecl *MD) { + assert(MD->isInstance() && + "Trying to emit a member call expr on a static method!"); + + + const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType(); + const llvm::Type *Ty = + CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD), + FPT->isVariadic()); + llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty); + + llvm::Value *This = EmitLValue(E->getArg(0)).getAddress(); + + return EmitCXXMemberCall(MD, Callee, This, + E->arg_begin() + 1, E->arg_end()); +} + +llvm::Value *CodeGenFunction::LoadCXXThis() { + assert(isa<CXXMethodDecl>(CurFuncDecl) && + "Must be in a C++ member function decl to load 'this'"); + assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() && + "Must be in a C++ member function decl to load 'this'"); + + // FIXME: What if we're inside a block? + // ans: See how CodeGenFunction::LoadObjCSelf() uses + // CodeGenFunction::BlockForwardSelf() for how to do this. + return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this"); +} + +void +CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, + CXXCtorType Type, + llvm::Value *This, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd) { + llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type); + + EmitCXXMemberCall(D, Callee, This, ArgBeg, ArgEnd); +} + +void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D, + CXXDtorType Type, + llvm::Value *This) { + llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type); + + EmitCXXMemberCall(D, Callee, This, 0, 0); +} + +void +CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest, + const CXXConstructExpr *E) { + assert(Dest && "Must have a destination!"); + + const CXXRecordDecl *RD = + cast<CXXRecordDecl>(E->getType()->getAsRecordType()->getDecl()); + if (RD->hasTrivialConstructor()) + return; + + // Call the constructor. + EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest, + E->arg_begin(), E->arg_end()); +} + +void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary, + llvm::Value *Ptr) { + LiveTemporaries.push_back(Temporary); + + // Make a cleanup scope and emit the destructor. + { + CleanupScope Scope(*this); + + EmitCXXDestructorCall(Temporary->getDestructor(), Dtor_Complete, Ptr); + } +} + +RValue +CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E, + llvm::Value *AggLoc, + bool isAggLocVolatile) { + // Keep track of the current cleanup stack depth. + size_t CleanupStackDepth = CleanupEntries.size(); + + unsigned OldNumLiveTemporaries = LiveTemporaries.size(); + + RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, isAggLocVolatile); + + // Go through the temporaries backwards. + for (unsigned i = E->getNumTemporaries(); i != 0; --i) { + assert(LiveTemporaries.back() == E->getTemporary(i - 1)); + LiveTemporaries.pop_back(); + } + + assert(OldNumLiveTemporaries == LiveTemporaries.size() && + "Live temporary stack mismatch!"); + + EmitCleanupBlocks(CleanupStackDepth); + + return RV; +} + +llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { + if (E->isArray()) { + ErrorUnsupported(E, "new[] expression"); + return llvm::UndefValue::get(ConvertType(E->getType())); + } + + QualType AllocType = E->getAllocatedType(); + FunctionDecl *NewFD = E->getOperatorNew(); + const FunctionProtoType *NewFTy = NewFD->getType()->getAsFunctionProtoType(); + + CallArgList NewArgs; + + // The allocation size is the first argument. + QualType SizeTy = getContext().getSizeType(); + llvm::Value *AllocSize = + llvm::ConstantInt::get(ConvertType(SizeTy), + getContext().getTypeSize(AllocType) / 8); + + NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy)); + + // Emit the rest of the arguments. + // FIXME: Ideally, this should just use EmitCallArgs. + CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin(); + + // First, use the types from the function type. + // We start at 1 here because the first argument (the allocation size) + // has already been emitted. + for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) { + QualType ArgType = NewFTy->getArgType(i); + + assert(getContext().getCanonicalType(ArgType.getNonReferenceType()). + getTypePtr() == + getContext().getCanonicalType(NewArg->getType()).getTypePtr() && + "type mismatch in call argument!"); + + NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), + ArgType)); + + } + + // Either we've emitted all the call args, or we have a call to a + // variadic function. + assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) && + "Extra arguments in non-variadic function!"); + + // If we still have any arguments, emit them using the type of the argument. + for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end(); + NewArg != NewArgEnd; ++NewArg) { + QualType ArgType = NewArg->getType(); + NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType), + ArgType)); + } + + // Emit the call to new. + RValue RV = + EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs), + CGM.GetAddrOfFunction(GlobalDecl(NewFD)), + NewArgs, NewFD); + + // If an allocation function is declared with an empty exception specification + // it returns null to indicate failure to allocate storage. [expr.new]p13. + // (We don't need to check for null when there's no new initializer and + // we're allocating a POD type). + bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() && + !(AllocType->isPODType() && !E->hasInitializer()); + + llvm::BasicBlock *NewNull = 0; + llvm::BasicBlock *NewNotNull = 0; + llvm::BasicBlock *NewEnd = 0; + + llvm::Value *NewPtr = RV.getScalarVal(); + + if (NullCheckResult) { + NewNull = createBasicBlock("new.null"); + NewNotNull = createBasicBlock("new.notnull"); + NewEnd = createBasicBlock("new.end"); + + llvm::Value *IsNull = + Builder.CreateICmpEQ(NewPtr, + llvm::Constant::getNullValue(NewPtr->getType()), + "isnull"); + + Builder.CreateCondBr(IsNull, NewNull, NewNotNull); + EmitBlock(NewNotNull); + } + + NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType())); + + if (AllocType->isPODType()) { + if (E->getNumConstructorArgs() > 0) { + assert(E->getNumConstructorArgs() == 1 && + "Can only have one argument to initializer of POD type."); + + const Expr *Init = E->getConstructorArg(0); + + if (!hasAggregateLLVMType(AllocType)) + Builder.CreateStore(EmitScalarExpr(Init), NewPtr); + else if (AllocType->isAnyComplexType()) + EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified()); + else + EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified()); + } + } else { + // Call the constructor. + CXXConstructorDecl *Ctor = E->getConstructor(); + + EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr, + E->constructor_arg_begin(), + E->constructor_arg_end()); + } + + if (NullCheckResult) { + Builder.CreateBr(NewEnd); + EmitBlock(NewNull); + Builder.CreateBr(NewEnd); + EmitBlock(NewEnd); + + llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType()); + PHI->reserveOperandSpace(2); + PHI->addIncoming(NewPtr, NewNotNull); + PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull); + + NewPtr = PHI; + } + + return NewPtr; +} + +static bool canGenerateCXXstructor(const CXXRecordDecl *RD, + ASTContext &Context) { + // The class has base classes - we don't support that right now. + if (RD->getNumBases() > 0) + return false; + + for (CXXRecordDecl::field_iterator I = RD->field_begin(Context), + E = RD->field_end(Context); I != E; ++I) { + // We don't support ctors for fields that aren't POD. + if (!I->getType()->isPODType()) + return false; + } + + return true; +} + +void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) { + if (!canGenerateCXXstructor(D->getParent(), getContext())) { + ErrorUnsupported(D, "C++ constructor", true); + return; + } + + EmitGlobal(GlobalDecl(D, Ctor_Complete)); + EmitGlobal(GlobalDecl(D, Ctor_Base)); +} + +void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D, + CXXCtorType Type) { + + llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type); + + CodeGenFunction(*this).GenerateCode(D, Fn); + + SetFunctionDefinitionAttributes(D, Fn); + SetLLVMFunctionAttributesForDefinition(D, Fn); +} + +llvm::Function * +CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D, + CXXCtorType Type) { + const llvm::FunctionType *FTy = + getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false); + + const char *Name = getMangledCXXCtorName(D, Type); + return cast<llvm::Function>( + GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type))); +} + +const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D, + CXXCtorType Type) { + llvm::SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + mangleCXXCtor(D, Type, Context, Out); + + Name += '\0'; + return UniqueMangledName(Name.begin(), Name.end()); +} + +void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) { + if (!canGenerateCXXstructor(D->getParent(), getContext())) { + ErrorUnsupported(D, "C++ destructor", true); + return; + } + + EmitCXXDestructor(D, Dtor_Complete); + EmitCXXDestructor(D, Dtor_Base); +} + +void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D, + CXXDtorType Type) { + llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type); + + CodeGenFunction(*this).GenerateCode(D, Fn); + + SetFunctionDefinitionAttributes(D, Fn); + SetLLVMFunctionAttributesForDefinition(D, Fn); +} + +llvm::Function * +CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D, + CXXDtorType Type) { + const llvm::FunctionType *FTy = + getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false); + + const char *Name = getMangledCXXDtorName(D, Type); + return cast<llvm::Function>( + GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type))); +} + +const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D, + CXXDtorType Type) { + llvm::SmallString<256> Name; + llvm::raw_svector_ostream Out(Name); + mangleCXXDtor(D, Type, Context, Out); + + Name += '\0'; + return UniqueMangledName(Name.begin(), Name.end()); +} diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h new file mode 100644 index 000000000000..6051d9133c02 --- /dev/null +++ b/lib/CodeGen/CGCXX.h @@ -0,0 +1,36 @@ +//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGCXX_H +#define CLANG_CODEGEN_CGCXX_H + +namespace clang { + +/// CXXCtorType - C++ constructor types +enum CXXCtorType { + Ctor_Complete, // Complete object ctor + Ctor_Base, // Base object ctor + Ctor_CompleteAllocating // Complete object allocating ctor +}; + +/// CXXDtorType - C++ destructor types +enum CXXDtorType { + Dtor_Deleting, // Deleting dtor + Dtor_Complete, // Complete object dtor + Dtor_Base // Base object dtor +}; + +} // end namespace clang + +#endif // CLANG_CODEGEN_CGCXX_H diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp new file mode 100644 index 000000000000..ea0b887c64c6 --- /dev/null +++ b/lib/CodeGen/CGCall.cpp @@ -0,0 +1,2196 @@ +//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#include "CGCall.h" +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/RecordLayout.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Attributes.h" +#include "llvm/Support/CallSite.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Target/TargetData.h" + +#include "ABIInfo.h" + +using namespace clang; +using namespace CodeGen; + +/***/ + +// FIXME: Use iterator and sidestep silly type array creation. + +const +CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) { + return getFunctionInfo(FTNP->getResultType(), + llvm::SmallVector<QualType, 16>()); +} + +const +CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) { + llvm::SmallVector<QualType, 16> ArgTys; + // FIXME: Kill copy. + for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) + ArgTys.push_back(FTP->getArgType(i)); + return getFunctionInfo(FTP->getResultType(), ArgTys); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) { + llvm::SmallVector<QualType, 16> ArgTys; + // Add the 'this' pointer unless this is a static method. + if (MD->isInstance()) + ArgTys.push_back(MD->getThisType(Context)); + + const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType(); + for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) + ArgTys.push_back(FTP->getArgType(i)); + return getFunctionInfo(FTP->getResultType(), ArgTys); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) { + if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) + if (MD->isInstance()) + return getFunctionInfo(MD); + + const FunctionType *FTy = FD->getType()->getAsFunctionType(); + if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy)) + return getFunctionInfo(FTP); + return getFunctionInfo(cast<FunctionNoProtoType>(FTy)); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) { + llvm::SmallVector<QualType, 16> ArgTys; + ArgTys.push_back(MD->getSelfDecl()->getType()); + ArgTys.push_back(Context.getObjCSelType()); + // FIXME: Kill copy? + for (ObjCMethodDecl::param_iterator i = MD->param_begin(), + e = MD->param_end(); i != e; ++i) + ArgTys.push_back((*i)->getType()); + return getFunctionInfo(MD->getResultType(), ArgTys); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, + const CallArgList &Args) { + // FIXME: Kill copy. + llvm::SmallVector<QualType, 16> ArgTys; + for (CallArgList::const_iterator i = Args.begin(), e = Args.end(); + i != e; ++i) + ArgTys.push_back(i->second); + return getFunctionInfo(ResTy, ArgTys); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, + const FunctionArgList &Args) { + // FIXME: Kill copy. + llvm::SmallVector<QualType, 16> ArgTys; + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); + i != e; ++i) + ArgTys.push_back(i->second); + return getFunctionInfo(ResTy, ArgTys); +} + +const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy, + const llvm::SmallVector<QualType, 16> &ArgTys) { + // Lookup or create unique function info. + llvm::FoldingSetNodeID ID; + CGFunctionInfo::Profile(ID, ResTy, ArgTys.begin(), ArgTys.end()); + + void *InsertPos = 0; + CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos); + if (FI) + return *FI; + + // Construct the function info. + FI = new CGFunctionInfo(ResTy, ArgTys); + FunctionInfos.InsertNode(FI, InsertPos); + + // Compute ABI information. + getABIInfo().computeInfo(*FI, getContext()); + + return *FI; +} + +/***/ + +ABIInfo::~ABIInfo() {} + +void ABIArgInfo::dump() const { + fprintf(stderr, "(ABIArgInfo Kind="); + switch (TheKind) { + case Direct: + fprintf(stderr, "Direct"); + break; + case Ignore: + fprintf(stderr, "Ignore"); + break; + case Coerce: + fprintf(stderr, "Coerce Type="); + getCoerceToType()->print(llvm::errs()); + break; + case Indirect: + fprintf(stderr, "Indirect Align=%d", getIndirectAlign()); + break; + case Expand: + fprintf(stderr, "Expand"); + break; + } + fprintf(stderr, ")\n"); +} + +/***/ + +static bool isEmptyRecord(ASTContext &Context, QualType T); + +/// isEmptyField - Return true iff a the field is "empty", that is it +/// is an unnamed bit-field or an (array of) empty record(s). +static bool isEmptyField(ASTContext &Context, const FieldDecl *FD) { + if (FD->isUnnamedBitfield()) + return true; + + QualType FT = FD->getType(); + // Constant arrays of empty records count as empty, strip them off. + while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) + FT = AT->getElementType(); + + return isEmptyRecord(Context, FT); +} + +/// isEmptyRecord - Return true iff a structure contains only empty +/// fields. Note that a structure with a flexible array member is not +/// considered empty. +static bool isEmptyRecord(ASTContext &Context, QualType T) { + const RecordType *RT = T->getAsRecordType(); + if (!RT) + return 0; + const RecordDecl *RD = RT->getDecl(); + if (RD->hasFlexibleArrayMember()) + return false; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) + if (!isEmptyField(Context, *i)) + return false; + return true; +} + +/// isSingleElementStruct - Determine if a structure is a "single +/// element struct", i.e. it has exactly one non-empty field or +/// exactly one field which is itself a single element +/// struct. Structures with flexible array members are never +/// considered single element structs. +/// +/// \return The field declaration for the single non-empty field, if +/// it exists. +static const Type *isSingleElementStruct(QualType T, ASTContext &Context) { + const RecordType *RT = T->getAsStructureType(); + if (!RT) + return 0; + + const RecordDecl *RD = RT->getDecl(); + if (RD->hasFlexibleArrayMember()) + return 0; + + const Type *Found = 0; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + QualType FT = FD->getType(); + + // Ignore empty fields. + if (isEmptyField(Context, FD)) + continue; + + // If we already found an element then this isn't a single-element + // struct. + if (Found) + return 0; + + // Treat single element arrays as the element. + while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) { + if (AT->getSize().getZExtValue() != 1) + break; + FT = AT->getElementType(); + } + + if (!CodeGenFunction::hasAggregateLLVMType(FT)) { + Found = FT.getTypePtr(); + } else { + Found = isSingleElementStruct(FT, Context); + if (!Found) + return 0; + } + } + + return Found; +} + +static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) { + if (!Ty->getAsBuiltinType() && !Ty->isPointerType()) + return false; + + uint64_t Size = Context.getTypeSize(Ty); + return Size == 32 || Size == 64; +} + +static bool areAllFields32Or64BitBasicType(const RecordDecl *RD, + ASTContext &Context) { + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + + if (!is32Or64BitBasicType(FD->getType(), Context)) + return false; + + // FIXME: Reject bit-fields wholesale; there are two problems, we don't know + // how to expand them yet, and the predicate for telling if a bitfield still + // counts as "basic" is more complicated than what we were doing previously. + if (FD->isBitField()) + return false; + } + + return true; +} + +namespace { +/// DefaultABIInfo - The default implementation for ABI specific +/// details. This implementation provides information which results in +/// self-consistent and sensible LLVM IR generation, but does not +/// conform to any particular ABI. +class DefaultABIInfo : public ABIInfo { + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) + it->info = classifyArgumentType(it->type, Context); + } + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; +}; + +/// X86_32ABIInfo - The X86-32 ABI information. +class X86_32ABIInfo : public ABIInfo { + ASTContext &Context; + bool IsDarwin; + + static bool isRegisterSize(unsigned Size) { + return (Size == 8 || Size == 16 || Size == 32 || Size == 64); + } + + static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context); + +public: + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) + it->info = classifyArgumentType(it->type, Context); + } + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; + + X86_32ABIInfo(ASTContext &Context, bool d) + : ABIInfo(), Context(Context), IsDarwin(d) {} +}; +} + + +/// shouldReturnTypeInRegister - Determine if the given type should be +/// passed in a register (for the Darwin ABI). +bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty, + ASTContext &Context) { + uint64_t Size = Context.getTypeSize(Ty); + + // Type must be register sized. + if (!isRegisterSize(Size)) + return false; + + if (Ty->isVectorType()) { + // 64- and 128- bit vectors inside structures are not returned in + // registers. + if (Size == 64 || Size == 128) + return false; + + return true; + } + + // If this is a builtin, pointer, or complex type, it is ok. + if (Ty->getAsBuiltinType() || Ty->isPointerType() || Ty->isAnyComplexType()) + return true; + + // Arrays are treated like records. + if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) + return shouldReturnTypeInRegister(AT->getElementType(), Context); + + // Otherwise, it must be a record type. + const RecordType *RT = Ty->getAsRecordType(); + if (!RT) return false; + + // Structure types are passed in register if all fields would be + // passed in a register. + for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(Context), + e = RT->getDecl()->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + + // Empty fields are ignored. + if (isEmptyField(Context, FD)) + continue; + + // Check fields recursively. + if (!shouldReturnTypeInRegister(FD->getType(), Context)) + return false; + } + + return true; +} + +ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + if (RetTy->isVoidType()) { + return ABIArgInfo::getIgnore(); + } else if (const VectorType *VT = RetTy->getAsVectorType()) { + // On Darwin, some vectors are returned in registers. + if (IsDarwin) { + uint64_t Size = Context.getTypeSize(RetTy); + + // 128-bit vectors are a special case; they are returned in + // registers and we need to make sure to pick a type the LLVM + // backend will like. + if (Size == 128) + return ABIArgInfo::getCoerce(llvm::VectorType::get(llvm::Type::Int64Ty, + 2)); + + // Always return in register if it fits in a general purpose + // register, or if it is 64 bits and has a single element. + if ((Size == 8 || Size == 16 || Size == 32) || + (Size == 64 && VT->getNumElements() == 1)) + return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + + return ABIArgInfo::getIndirect(0); + } + + return ABIArgInfo::getDirect(); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + // Structures with flexible arrays are always indirect. + if (const RecordType *RT = RetTy->getAsStructureType()) + if (RT->getDecl()->hasFlexibleArrayMember()) + return ABIArgInfo::getIndirect(0); + + // Outside of Darwin, structs and unions are always indirect. + if (!IsDarwin && !RetTy->isAnyComplexType()) + return ABIArgInfo::getIndirect(0); + + // Classify "single element" structs as their element type. + if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) { + if (const BuiltinType *BT = SeltTy->getAsBuiltinType()) { + if (BT->isIntegerType()) { + // We need to use the size of the structure, padding + // bit-fields can adjust that to be larger than the single + // element type. + uint64_t Size = Context.getTypeSize(RetTy); + return ABIArgInfo::getCoerce(llvm::IntegerType::get((unsigned) Size)); + } else if (BT->getKind() == BuiltinType::Float) { + assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && + "Unexpect single element structure size!"); + return ABIArgInfo::getCoerce(llvm::Type::FloatTy); + } else if (BT->getKind() == BuiltinType::Double) { + assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) && + "Unexpect single element structure size!"); + return ABIArgInfo::getCoerce(llvm::Type::DoubleTy); + } + } else if (SeltTy->isPointerType()) { + // FIXME: It would be really nice if this could come out as the proper + // pointer type. + llvm::Type *PtrTy = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + return ABIArgInfo::getCoerce(PtrTy); + } else if (SeltTy->isVectorType()) { + // 64- and 128-bit vectors are never returned in a + // register when inside a structure. + uint64_t Size = Context.getTypeSize(RetTy); + if (Size == 64 || Size == 128) + return ABIArgInfo::getIndirect(0); + + return classifyReturnType(QualType(SeltTy, 0), Context); + } + } + + // Small structures which are register sized are generally returned + // in a register. + if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) { + uint64_t Size = Context.getTypeSize(RetTy); + return ABIArgInfo::getCoerce(llvm::IntegerType::get(Size)); + } + + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, + ASTContext &Context) const { + // FIXME: Set alignment on indirect arguments. + if (CodeGenFunction::hasAggregateLLVMType(Ty)) { + // Structures with flexible arrays are always indirect. + if (const RecordType *RT = Ty->getAsStructureType()) + if (RT->getDecl()->hasFlexibleArrayMember()) + return ABIArgInfo::getIndirect(0); + + // Ignore empty structs. + uint64_t Size = Context.getTypeSize(Ty); + if (Ty->isStructureType() && Size == 0) + return ABIArgInfo::getIgnore(); + + // Expand structs with size <= 128-bits which consist only of + // basic types (int, long long, float, double, xxx*). This is + // non-recursive and does not ignore empty fields. + if (const RecordType *RT = Ty->getAsStructureType()) { + if (Context.getTypeSize(Ty) <= 4*32 && + areAllFields32Or64BitBasicType(RT->getDecl(), Context)) + return ABIArgInfo::getExpand(); + } + + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); + + CGBuilderTy &Builder = CGF.Builder; + llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, + "ap"); + llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); + llvm::Type *PTy = + llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); + llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); + + uint64_t Offset = + llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); + llvm::Value *NextAddr = + Builder.CreateGEP(Addr, + llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), + "ap.next"); + Builder.CreateStore(NextAddr, VAListAddrAsBPP); + + return AddrTyped; +} + +namespace { +/// X86_64ABIInfo - The X86_64 ABI information. +class X86_64ABIInfo : public ABIInfo { + enum Class { + Integer = 0, + SSE, + SSEUp, + X87, + X87Up, + ComplexX87, + NoClass, + Memory + }; + + /// merge - Implement the X86_64 ABI merging algorithm. + /// + /// Merge an accumulating classification \arg Accum with a field + /// classification \arg Field. + /// + /// \param Accum - The accumulating classification. This should + /// always be either NoClass or the result of a previous merge + /// call. In addition, this should never be Memory (the caller + /// should just return Memory for the aggregate). + Class merge(Class Accum, Class Field) const; + + /// classify - Determine the x86_64 register classes in which the + /// given type T should be passed. + /// + /// \param Lo - The classification for the parts of the type + /// residing in the low word of the containing object. + /// + /// \param Hi - The classification for the parts of the type + /// residing in the high word of the containing object. + /// + /// \param OffsetBase - The bit offset of this type in the + /// containing object. Some parameters are classified different + /// depending on whether they straddle an eightbyte boundary. + /// + /// If a word is unused its result will be NoClass; if a type should + /// be passed in Memory then at least the classification of \arg Lo + /// will be Memory. + /// + /// The \arg Lo class will be NoClass iff the argument is ignored. + /// + /// If the \arg Lo class is ComplexX87, then the \arg Hi class will + /// also be ComplexX87. + void classify(QualType T, ASTContext &Context, uint64_t OffsetBase, + Class &Lo, Class &Hi) const; + + /// getCoerceResult - Given a source type \arg Ty and an LLVM type + /// to coerce to, chose the best way to pass Ty in the same place + /// that \arg CoerceTo would be passed, but while keeping the + /// emitted code as simple as possible. + /// + /// FIXME: Note, this should be cleaned up to just take an enumeration of all + /// the ways we might want to pass things, instead of constructing an LLVM + /// type. This makes this code more explicit, and it makes it clearer that we + /// are also doing this for correctness in the case of passing scalar types. + ABIArgInfo getCoerceResult(QualType Ty, + const llvm::Type *CoerceTo, + ASTContext &Context) const; + + /// getIndirectResult - Give a source type \arg Ty, return a suitable result + /// such that the argument will be passed in memory. + ABIArgInfo getIndirectResult(QualType Ty, + ASTContext &Context) const; + + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType Ty, + ASTContext &Context, + unsigned &neededInt, + unsigned &neededSSE) const; + +public: + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; +}; +} + +X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, + Class Field) const { + // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is + // classified recursively so that always two fields are + // considered. The resulting class is calculated according to + // the classes of the fields in the eightbyte: + // + // (a) If both classes are equal, this is the resulting class. + // + // (b) If one of the classes is NO_CLASS, the resulting class is + // the other class. + // + // (c) If one of the classes is MEMORY, the result is the MEMORY + // class. + // + // (d) If one of the classes is INTEGER, the result is the + // INTEGER. + // + // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class, + // MEMORY is used as class. + // + // (f) Otherwise class SSE is used. + + // Accum should never be memory (we should have returned) or + // ComplexX87 (because this cannot be passed in a structure). + assert((Accum != Memory && Accum != ComplexX87) && + "Invalid accumulated classification during merge."); + if (Accum == Field || Field == NoClass) + return Accum; + else if (Field == Memory) + return Memory; + else if (Accum == NoClass) + return Field; + else if (Accum == Integer || Field == Integer) + return Integer; + else if (Field == X87 || Field == X87Up || Field == ComplexX87 || + Accum == X87 || Accum == X87Up) + return Memory; + else + return SSE; +} + +void X86_64ABIInfo::classify(QualType Ty, + ASTContext &Context, + uint64_t OffsetBase, + Class &Lo, Class &Hi) const { + // FIXME: This code can be simplified by introducing a simple value class for + // Class pairs with appropriate constructor methods for the various + // situations. + + // FIXME: Some of the split computations are wrong; unaligned vectors + // shouldn't be passed in registers for example, so there is no chance they + // can straddle an eightbyte. Verify & simplify. + + Lo = Hi = NoClass; + + Class &Current = OffsetBase < 64 ? Lo : Hi; + Current = Memory; + + if (const BuiltinType *BT = Ty->getAsBuiltinType()) { + BuiltinType::Kind k = BT->getKind(); + + if (k == BuiltinType::Void) { + Current = NoClass; + } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) { + Lo = Integer; + Hi = Integer; + } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) { + Current = Integer; + } else if (k == BuiltinType::Float || k == BuiltinType::Double) { + Current = SSE; + } else if (k == BuiltinType::LongDouble) { + Lo = X87; + Hi = X87Up; + } + // FIXME: _Decimal32 and _Decimal64 are SSE. + // FIXME: _float128 and _Decimal128 are (SSE, SSEUp). + } else if (const EnumType *ET = Ty->getAsEnumType()) { + // Classify the underlying integer type. + classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi); + } else if (Ty->hasPointerRepresentation()) { + Current = Integer; + } else if (const VectorType *VT = Ty->getAsVectorType()) { + uint64_t Size = Context.getTypeSize(VT); + if (Size == 32) { + // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x + // float> as integer. + Current = Integer; + + // If this type crosses an eightbyte boundary, it should be + // split. + uint64_t EB_Real = (OffsetBase) / 64; + uint64_t EB_Imag = (OffsetBase + Size - 1) / 64; + if (EB_Real != EB_Imag) + Hi = Lo; + } else if (Size == 64) { + // gcc passes <1 x double> in memory. :( + if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) + return; + + // gcc passes <1 x long long> as INTEGER. + if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong)) + Current = Integer; + else + Current = SSE; + + // If this type crosses an eightbyte boundary, it should be + // split. + if (OffsetBase && OffsetBase != 64) + Hi = Lo; + } else if (Size == 128) { + Lo = SSE; + Hi = SSEUp; + } + } else if (const ComplexType *CT = Ty->getAsComplexType()) { + QualType ET = Context.getCanonicalType(CT->getElementType()); + + uint64_t Size = Context.getTypeSize(Ty); + if (ET->isIntegralType()) { + if (Size <= 64) + Current = Integer; + else if (Size <= 128) + Lo = Hi = Integer; + } else if (ET == Context.FloatTy) + Current = SSE; + else if (ET == Context.DoubleTy) + Lo = Hi = SSE; + else if (ET == Context.LongDoubleTy) + Current = ComplexX87; + + // If this complex type crosses an eightbyte boundary then it + // should be split. + uint64_t EB_Real = (OffsetBase) / 64; + uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64; + if (Hi == NoClass && EB_Real != EB_Imag) + Hi = Lo; + } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { + // Arrays are treated like structures. + + uint64_t Size = Context.getTypeSize(Ty); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than two eightbytes, ..., it has class MEMORY. + if (Size > 128) + return; + + // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned + // fields, it has class MEMORY. + // + // Only need to check alignment of array base. + if (OffsetBase % Context.getTypeAlign(AT->getElementType())) + return; + + // Otherwise implement simplified merge. We could be smarter about + // this, but it isn't worth it and would be harder to verify. + Current = NoClass; + uint64_t EltSize = Context.getTypeSize(AT->getElementType()); + uint64_t ArraySize = AT->getSize().getZExtValue(); + for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) { + Class FieldLo, FieldHi; + classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + + // Do post merger cleanup (see below). Only case we worry about is Memory. + if (Hi == Memory) + Lo = Memory; + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification."); + } else if (const RecordType *RT = Ty->getAsRecordType()) { + uint64_t Size = Context.getTypeSize(Ty); + + // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger + // than two eightbytes, ..., it has class MEMORY. + if (Size > 128) + return; + + const RecordDecl *RD = RT->getDecl(); + + // Assume variable sized types are passed in memory. + if (RD->hasFlexibleArrayMember()) + return; + + const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); + + // Reset Lo class, this will be recomputed. + Current = NoClass; + unsigned idx = 0; + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i, ++idx) { + uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); + bool BitField = i->isBitField(); + + // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned + // fields, it has class MEMORY. + // + // Note, skip this test for bit-fields, see below. + if (!BitField && Offset % Context.getTypeAlign(i->getType())) { + Lo = Memory; + return; + } + + // Classify this field. + // + // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate + // exceeds a single eightbyte, each is classified + // separately. Each eightbyte gets initialized to class + // NO_CLASS. + Class FieldLo, FieldHi; + + // Bit-fields require special handling, they do not force the + // structure to be passed in memory even if unaligned, and + // therefore they can straddle an eightbyte. + if (BitField) { + // Ignore padding bit-fields. + if (i->isUnnamedBitfield()) + continue; + + uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx); + uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue(); + + uint64_t EB_Lo = Offset / 64; + uint64_t EB_Hi = (Offset + Size - 1) / 64; + FieldLo = FieldHi = NoClass; + if (EB_Lo) { + assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes."); + FieldLo = NoClass; + FieldHi = Integer; + } else { + FieldLo = Integer; + FieldHi = EB_Hi ? Integer : NoClass; + } + } else + classify(i->getType(), Context, Offset, FieldLo, FieldHi); + Lo = merge(Lo, FieldLo); + Hi = merge(Hi, FieldHi); + if (Lo == Memory || Hi == Memory) + break; + } + + // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done: + // + // (a) If one of the classes is MEMORY, the whole argument is + // passed in memory. + // + // (b) If SSEUP is not preceeded by SSE, it is converted to SSE. + + // The first of these conditions is guaranteed by how we implement + // the merge (just bail). + // + // The second condition occurs in the case of unions; for example + // union { _Complex double; unsigned; }. + if (Hi == Memory) + Lo = Memory; + if (Hi == SSEUp && Lo != SSE) + Hi = SSE; + } +} + +ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty, + const llvm::Type *CoerceTo, + ASTContext &Context) const { + if (CoerceTo == llvm::Type::Int64Ty) { + // Integer and pointer types will end up in a general purpose + // register. + if (Ty->isIntegralType() || Ty->isPointerType()) + return ABIArgInfo::getDirect(); + + } else if (CoerceTo == llvm::Type::DoubleTy) { + // FIXME: It would probably be better to make CGFunctionInfo only map using + // canonical types than to canonize here. + QualType CTy = Context.getCanonicalType(Ty); + + // Float and double end up in a single SSE reg. + if (CTy == Context.FloatTy || CTy == Context.DoubleTy) + return ABIArgInfo::getDirect(); + + } + + return ABIArgInfo::getCoerce(CoerceTo); +} + +ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty, + ASTContext &Context) const { + // If this is a scalar LLVM value then assume LLVM will pass it in the right + // place naturally. + if (!CodeGenFunction::hasAggregateLLVMType(Ty)) + return ABIArgInfo::getDirect(); + + // FIXME: Set alignment correctly. + return ABIArgInfo::getIndirect(0); +} + +ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the + // classification algorithm. + X86_64ABIInfo::Class Lo, Hi; + classify(RetTy, Context, 0, Lo, Hi); + + // Check some invariants. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + const llvm::Type *ResType = 0; + switch (Lo) { + case NoClass: + return ABIArgInfo::getIgnore(); + + case SSEUp: + case X87Up: + assert(0 && "Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via + // hidden argument. + case Memory: + return getIndirectResult(RetTy, Context); + + // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next + // available register of the sequence %rax, %rdx is used. + case Integer: + ResType = llvm::Type::Int64Ty; break; + + // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next + // available SSE register of the sequence %xmm0, %xmm1 is used. + case SSE: + ResType = llvm::Type::DoubleTy; break; + + // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is + // returned on the X87 stack in %st0 as 80-bit x87 number. + case X87: + ResType = llvm::Type::X86_FP80Ty; break; + + // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real + // part of the value is returned in %st0 and the imaginary part in + // %st1. + case ComplexX87: + assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification."); + ResType = llvm::StructType::get(llvm::Type::X86_FP80Ty, + llvm::Type::X86_FP80Ty, + NULL); + break; + } + + switch (Hi) { + // Memory was handled previously and X87 should + // never occur as a hi class. + case Memory: + case X87: + assert(0 && "Invalid classification for hi word."); + + case ComplexX87: // Previously handled. + case NoClass: break; + + case Integer: + ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); + break; + case SSE: + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + break; + + // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte + // is passed in the upper half of the last used SSE register. + // + // SSEUP should always be preceeded by SSE, just widen. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification."); + ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + break; + + // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is + // returned together with the previous X87 value in %st0. + case X87Up: + // If X87Up is preceeded by X87, we don't need to do + // anything. However, in some cases with unions it may not be + // preceeded by X87. In such situations we follow gcc and pass the + // extra bits in an SSE reg. + if (Lo != X87) + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + break; + } + + return getCoerceResult(RetTy, ResType, Context); +} + +ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context, + unsigned &neededInt, + unsigned &neededSSE) const { + X86_64ABIInfo::Class Lo, Hi; + classify(Ty, Context, 0, Lo, Hi); + + // Check some invariants. + // FIXME: Enforce these by construction. + assert((Hi != Memory || Lo == Memory) && "Invalid memory classification."); + assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification."); + assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification."); + + neededInt = 0; + neededSSE = 0; + const llvm::Type *ResType = 0; + switch (Lo) { + case NoClass: + return ABIArgInfo::getIgnore(); + + // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument + // on the stack. + case Memory: + + // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or + // COMPLEX_X87, it is passed in memory. + case X87: + case ComplexX87: + return getIndirectResult(Ty, Context); + + case SSEUp: + case X87Up: + assert(0 && "Invalid classification for lo word."); + + // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next + // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8 + // and %r9 is used. + case Integer: + ++neededInt; + ResType = llvm::Type::Int64Ty; + break; + + // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next + // available SSE register is used, the registers are taken in the + // order from %xmm0 to %xmm7. + case SSE: + ++neededSSE; + ResType = llvm::Type::DoubleTy; + break; + } + + switch (Hi) { + // Memory was handled previously, ComplexX87 and X87 should + // never occur as hi classes, and X87Up must be preceed by X87, + // which is passed in memory. + case Memory: + case X87: + case ComplexX87: + assert(0 && "Invalid classification for hi word."); + break; + + case NoClass: break; + case Integer: + ResType = llvm::StructType::get(ResType, llvm::Type::Int64Ty, NULL); + ++neededInt; + break; + + // X87Up generally doesn't occur here (long double is passed in + // memory), except in situations involving unions. + case X87Up: + case SSE: + ResType = llvm::StructType::get(ResType, llvm::Type::DoubleTy, NULL); + ++neededSSE; + break; + + // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the + // eightbyte is passed in the upper half of the last used SSE + // register. + case SSEUp: + assert(Lo == SSE && "Unexpected SSEUp classification."); + ResType = llvm::VectorType::get(llvm::Type::DoubleTy, 2); + break; + } + + return getCoerceResult(Ty, ResType, Context); +} + +void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + + // Keep track of the number of assigned registers. + unsigned freeIntRegs = 6, freeSSERegs = 8; + + // If the return value is indirect, then the hidden argument is consuming one + // integer register. + if (FI.getReturnInfo().isIndirect()) + --freeIntRegs; + + // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers + // get assigned (in left-to-right order) for passing as follows... + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) { + unsigned neededInt, neededSSE; + it->info = classifyArgumentType(it->type, Context, neededInt, neededSSE); + + // AMD64-ABI 3.2.3p3: If there are no registers available for any + // eightbyte of an argument, the whole argument is passed on the + // stack. If registers have already been assigned for some + // eightbytes of such an argument, the assignments get reverted. + if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) { + freeIntRegs -= neededInt; + freeSSERegs -= neededSSE; + } else { + it->info = getIndirectResult(it->type, Context); + } + } +} + +static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, + QualType Ty, + CodeGenFunction &CGF) { + llvm::Value *overflow_arg_area_p = + CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p"); + llvm::Value *overflow_arg_area = + CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area"); + + // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16 + // byte boundary if alignment needed by type exceeds 8 byte boundary. + uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8; + if (Align > 8) { + // Note that we follow the ABI & gcc here, even though the type + // could in theory have an alignment greater than 16. This case + // shouldn't ever matter in practice. + + // overflow_arg_area = (overflow_arg_area + 15) & ~15; + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, 15); + overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); + llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, + llvm::Type::Int64Ty); + llvm::Value *Mask = llvm::ConstantInt::get(llvm::Type::Int64Ty, ~15LL); + overflow_arg_area = + CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), + overflow_arg_area->getType(), + "overflow_arg_area.align"); + } + + // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area. + const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); + llvm::Value *Res = + CGF.Builder.CreateBitCast(overflow_arg_area, + llvm::PointerType::getUnqual(LTy)); + + // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to: + // l->overflow_arg_area + sizeof(type). + // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to + // an 8 byte boundary. + + uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + (SizeInBytes + 7) & ~7); + overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, + "overflow_arg_area.next"); + CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); + + // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type. + return Res; +} + +llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + // Assume that va_list type is correct; should be pointer to LLVM type: + // struct { + // i32 gp_offset; + // i32 fp_offset; + // i8* overflow_arg_area; + // i8* reg_save_area; + // }; + unsigned neededInt, neededSSE; + ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), + neededInt, neededSSE); + + // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed + // in the registers. If not go to step 7. + if (!neededInt && !neededSSE) + return EmitVAArgFromMemory(VAListAddr, Ty, CGF); + + // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of + // general purpose registers needed to pass type and num_fp to hold + // the number of floating point registers needed. + + // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into + // registers. In the case: l->gp_offset > 48 - num_gp * 8 or + // l->fp_offset > 304 - num_fp * 16 go to step 7. + // + // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of + // register save space). + + llvm::Value *InRegs = 0; + llvm::Value *gp_offset_p = 0, *gp_offset = 0; + llvm::Value *fp_offset_p = 0, *fp_offset = 0; + if (neededInt) { + gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p"); + gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); + InRegs = + CGF.Builder.CreateICmpULE(gp_offset, + llvm::ConstantInt::get(llvm::Type::Int32Ty, + 48 - neededInt * 8), + "fits_in_gp"); + } + + if (neededSSE) { + fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p"); + fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); + llvm::Value *FitsInFP = + CGF.Builder.CreateICmpULE(fp_offset, + llvm::ConstantInt::get(llvm::Type::Int32Ty, + 176 - neededSSE * 16), + "fits_in_fp"); + InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; + } + + llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg"); + llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem"); + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end"); + CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock); + + // Emit code to load the value if it was passed in registers. + + CGF.EmitBlock(InRegBlock); + + // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with + // an offset of l->gp_offset and/or l->fp_offset. This may require + // copying to a temporary location in case the parameter is passed + // in different register classes or requires an alignment greater + // than 8 for general purpose registers and 16 for XMM registers. + // + // FIXME: This really results in shameful code when we end up needing to + // collect arguments from different places; often what should result in a + // simple assembling of a structure from scattered addresses has many more + // loads than necessary. Can we clean this up? + const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty); + llvm::Value *RegAddr = + CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3), + "reg_save_area"); + if (neededInt && neededSSE) { + // FIXME: Cleanup. + assert(AI.isCoerce() && "Unexpected ABI info for mixed regs"); + const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType()); + llvm::Value *Tmp = CGF.CreateTempAlloca(ST); + assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs"); + const llvm::Type *TyLo = ST->getElementType(0); + const llvm::Type *TyHi = ST->getElementType(1); + assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) && + "Unexpected ABI info for mixed regs"); + const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo); + const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi); + llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); + llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); + llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr; + llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr; + llvm::Value *V = + CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); + V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); + + RegAddr = CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(LTy)); + } else if (neededInt) { + RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset); + RegAddr = CGF.Builder.CreateBitCast(RegAddr, + llvm::PointerType::getUnqual(LTy)); + } else { + if (neededSSE == 1) { + RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset); + RegAddr = CGF.Builder.CreateBitCast(RegAddr, + llvm::PointerType::getUnqual(LTy)); + } else { + assert(neededSSE == 2 && "Invalid number of needed registers!"); + // SSE registers are spaced 16 bytes apart in the register save + // area, we need to collect the two eightbytes together. + llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); + llvm::Value *RegAddrHi = + CGF.Builder.CreateGEP(RegAddrLo, + llvm::ConstantInt::get(llvm::Type::Int32Ty, 16)); + const llvm::Type *DblPtrTy = + llvm::PointerType::getUnqual(llvm::Type::DoubleTy); + const llvm::StructType *ST = llvm::StructType::get(llvm::Type::DoubleTy, + llvm::Type::DoubleTy, + NULL); + llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST); + V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo, + DblPtrTy)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0)); + V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi, + DblPtrTy)); + CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1)); + RegAddr = CGF.Builder.CreateBitCast(Tmp, + llvm::PointerType::getUnqual(LTy)); + } + } + + // AMD64-ABI 3.5.7p5: Step 5. Set: + // l->gp_offset = l->gp_offset + num_gp * 8 + // l->fp_offset = l->fp_offset + num_fp * 16. + if (neededInt) { + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + neededInt * 8); + CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), + gp_offset_p); + } + if (neededSSE) { + llvm::Value *Offset = llvm::ConstantInt::get(llvm::Type::Int32Ty, + neededSSE * 16); + CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), + fp_offset_p); + } + CGF.EmitBranch(ContBlock); + + // Emit code to load the value if it was passed in memory. + + CGF.EmitBlock(InMemBlock); + llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF); + + // Return the appropriate result. + + CGF.EmitBlock(ContBlock); + llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), + "vaarg.addr"); + ResAddr->reserveOperandSpace(2); + ResAddr->addIncoming(RegAddr, InRegBlock); + ResAddr->addIncoming(MemAddr, InMemBlock); + + return ResAddr; +} + +// ABI Info for PIC16 +class PIC16ABIInfo : public ABIInfo { + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) + it->info = classifyArgumentType(it->type, Context); + } + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; + +}; + +ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + if (RetTy->isVoidType()) { + return ABIArgInfo::getIgnore(); + } else { + return ABIArgInfo::getDirect(); + } +} + +ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty, + ASTContext &Context) const { + return ABIArgInfo::getDirect(); +} + +llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + return 0; +} + +class ARMABIInfo : public ABIInfo { + ABIArgInfo classifyReturnType(QualType RetTy, + ASTContext &Context) const; + + ABIArgInfo classifyArgumentType(QualType RetTy, + ASTContext &Context) const; + + virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context) const; + + virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const; +}; + +void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context) const { + FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context); + for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end(); + it != ie; ++it) { + it->info = classifyArgumentType(it->type, Context); + } +} + +ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, + ASTContext &Context) const { + if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { + return ABIArgInfo::getDirect(); + } + // FIXME: This is kind of nasty... but there isn't much choice because the ARM + // backend doesn't support byval. + // FIXME: This doesn't handle alignment > 64 bits. + const llvm::Type* ElemTy; + unsigned SizeRegs; + if (Context.getTypeAlign(Ty) > 32) { + ElemTy = llvm::Type::Int64Ty; + SizeRegs = (Context.getTypeSize(Ty) + 63) / 64; + } else { + ElemTy = llvm::Type::Int32Ty; + SizeRegs = (Context.getTypeSize(Ty) + 31) / 32; + } + std::vector<const llvm::Type*> LLVMFields; + LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs)); + const llvm::Type* STy = llvm::StructType::get(LLVMFields, true); + return ABIArgInfo::getCoerce(STy); +} + +ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + if (RetTy->isVoidType()) { + return ABIArgInfo::getIgnore(); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + // Aggregates <= 4 bytes are returned in r0; other aggregates + // are returned indirectly. + uint64_t Size = Context.getTypeSize(RetTy); + if (Size <= 32) + return ABIArgInfo::getCoerce(llvm::Type::Int32Ty); + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + // FIXME: Need to handle alignment + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); + + CGBuilderTy &Builder = CGF.Builder; + llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, + "ap"); + llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur"); + llvm::Type *PTy = + llvm::PointerType::getUnqual(CGF.ConvertType(Ty)); + llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy); + + uint64_t Offset = + llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); + llvm::Value *NextAddr = + Builder.CreateGEP(Addr, + llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset), + "ap.next"); + Builder.CreateStore(NextAddr, VAListAddrAsBPP); + + return AddrTyped; +} + +ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy, + ASTContext &Context) const { + if (RetTy->isVoidType()) { + return ABIArgInfo::getIgnore(); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty, + ASTContext &Context) const { + if (CodeGenFunction::hasAggregateLLVMType(Ty)) { + return ABIArgInfo::getIndirect(0); + } else { + return ABIArgInfo::getDirect(); + } +} + +llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, + CodeGenFunction &CGF) const { + return 0; +} + +const ABIInfo &CodeGenTypes::getABIInfo() const { + if (TheABIInfo) + return *TheABIInfo; + + // For now we just cache this in the CodeGenTypes and don't bother + // to free it. + const char *TargetPrefix = getContext().Target.getTargetPrefix(); + if (strcmp(TargetPrefix, "x86") == 0) { + bool IsDarwin = strstr(getContext().Target.getTargetTriple(), "darwin"); + switch (getContext().Target.getPointerWidth(0)) { + case 32: + return *(TheABIInfo = new X86_32ABIInfo(Context, IsDarwin)); + case 64: + return *(TheABIInfo = new X86_64ABIInfo()); + } + } else if (strcmp(TargetPrefix, "arm") == 0) { + // FIXME: Support for OABI? + return *(TheABIInfo = new ARMABIInfo()); + } else if (strcmp(TargetPrefix, "pic16") == 0) { + return *(TheABIInfo = new PIC16ABIInfo()); + } + + return *(TheABIInfo = new DefaultABIInfo); +} + +/***/ + +CGFunctionInfo::CGFunctionInfo(QualType ResTy, + const llvm::SmallVector<QualType, 16> &ArgTys) { + NumArgs = ArgTys.size(); + Args = new ArgInfo[1 + NumArgs]; + Args[0].type = ResTy; + for (unsigned i = 0; i < NumArgs; ++i) + Args[1 + i].type = ArgTys[i]; +} + +/***/ + +void CodeGenTypes::GetExpandedTypes(QualType Ty, + std::vector<const llvm::Type*> &ArgTys) { + const RecordType *RT = Ty->getAsStructureType(); + assert(RT && "Can only expand structure types."); + const RecordDecl *RD = RT->getDecl(); + assert(!RD->hasFlexibleArrayMember() && + "Cannot expand structure with flexible array."); + + for (RecordDecl::field_iterator i = RD->field_begin(Context), + e = RD->field_end(Context); i != e; ++i) { + const FieldDecl *FD = *i; + assert(!FD->isBitField() && + "Cannot expand structure with bit-field members."); + + QualType FT = FD->getType(); + if (CodeGenFunction::hasAggregateLLVMType(FT)) { + GetExpandedTypes(FT, ArgTys); + } else { + ArgTys.push_back(ConvertType(FT)); + } + } +} + +llvm::Function::arg_iterator +CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, + llvm::Function::arg_iterator AI) { + const RecordType *RT = Ty->getAsStructureType(); + assert(RT && "Can only expand structure types."); + + RecordDecl *RD = RT->getDecl(); + assert(LV.isSimple() && + "Unexpected non-simple lvalue during struct expansion."); + llvm::Value *Addr = LV.getAddress(); + for (RecordDecl::field_iterator i = RD->field_begin(getContext()), + e = RD->field_end(getContext()); i != e; ++i) { + FieldDecl *FD = *i; + QualType FT = FD->getType(); + + // FIXME: What are the right qualifiers here? + LValue LV = EmitLValueForField(Addr, FD, false, 0); + if (CodeGenFunction::hasAggregateLLVMType(FT)) { + AI = ExpandTypeFromArgs(FT, LV, AI); + } else { + EmitStoreThroughLValue(RValue::get(AI), LV, FT); + ++AI; + } + } + + return AI; +} + +void +CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, + llvm::SmallVector<llvm::Value*, 16> &Args) { + const RecordType *RT = Ty->getAsStructureType(); + assert(RT && "Can only expand structure types."); + + RecordDecl *RD = RT->getDecl(); + assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); + llvm::Value *Addr = RV.getAggregateAddr(); + for (RecordDecl::field_iterator i = RD->field_begin(getContext()), + e = RD->field_end(getContext()); i != e; ++i) { + FieldDecl *FD = *i; + QualType FT = FD->getType(); + + // FIXME: What are the right qualifiers here? + LValue LV = EmitLValueForField(Addr, FD, false, 0); + if (CodeGenFunction::hasAggregateLLVMType(FT)) { + ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args); + } else { + RValue RV = EmitLoadOfLValue(LV, FT); + assert(RV.isScalar() && + "Unexpected non-scalar rvalue during struct expansion."); + Args.push_back(RV.getScalarVal()); + } + } +} + +/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as +/// a pointer to an object of type \arg Ty. +/// +/// This safely handles the case when the src type is smaller than the +/// destination type; in this situation the values of bits which not +/// present in the src are undefined. +static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, + const llvm::Type *Ty, + CodeGenFunction &CGF) { + const llvm::Type *SrcTy = + cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); + uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); + uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty); + + // If load is legal, just bitcast the src pointer. + if (SrcSize >= DstSize) { + // Generally SrcSize is never greater than DstSize, since this means we are + // losing bits. However, this can happen in cases where the structure has + // additional padding, for example due to a user specified alignment. + // + // FIXME: Assert that we aren't truncating non-padding bits when have access + // to that information. + llvm::Value *Casted = + CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); + llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); + // FIXME: Use better alignment / avoid requiring aligned load. + Load->setAlignment(1); + return Load; + } else { + // Otherwise do coercion through memory. This is stupid, but + // simple. + llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); + llvm::Value *Casted = + CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy)); + llvm::StoreInst *Store = + CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted); + // FIXME: Use better alignment / avoid requiring aligned store. + Store->setAlignment(1); + return CGF.Builder.CreateLoad(Tmp); + } +} + +/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, +/// where the source and destination may have different types. +/// +/// This safely handles the case when the src type is larger than the +/// destination type; the upper bits of the src will be lost. +static void CreateCoercedStore(llvm::Value *Src, + llvm::Value *DstPtr, + CodeGenFunction &CGF) { + const llvm::Type *SrcTy = Src->getType(); + const llvm::Type *DstTy = + cast<llvm::PointerType>(DstPtr->getType())->getElementType(); + + uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy); + uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy); + + // If store is legal, just bitcast the src pointer. + if (SrcSize >= DstSize) { + // Generally SrcSize is never greater than DstSize, since this means we are + // losing bits. However, this can happen in cases where the structure has + // additional padding, for example due to a user specified alignment. + // + // FIXME: Assert that we aren't truncating non-padding bits when have access + // to that information. + llvm::Value *Casted = + CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); + // FIXME: Use better alignment / avoid requiring aligned store. + CGF.Builder.CreateStore(Src, Casted)->setAlignment(1); + } else { + // Otherwise do coercion through memory. This is stupid, but + // simple. + llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); + CGF.Builder.CreateStore(Src, Tmp); + llvm::Value *Casted = + CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy)); + llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); + // FIXME: Use better alignment / avoid requiring aligned load. + Load->setAlignment(1); + CGF.Builder.CreateStore(Load, DstPtr); + } +} + +/***/ + +bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) { + return FI.getReturnInfo().isIndirect(); +} + +const llvm::FunctionType * +CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) { + std::vector<const llvm::Type*> ArgTys; + + const llvm::Type *ResultType = 0; + + QualType RetTy = FI.getReturnType(); + const ABIArgInfo &RetAI = FI.getReturnInfo(); + switch (RetAI.getKind()) { + case ABIArgInfo::Expand: + assert(0 && "Invalid ABI kind for return argument"); + + case ABIArgInfo::Direct: + ResultType = ConvertType(RetTy); + break; + + case ABIArgInfo::Indirect: { + assert(!RetAI.getIndirectAlign() && "Align unused on indirect return."); + ResultType = llvm::Type::VoidTy; + const llvm::Type *STy = ConvertType(RetTy); + ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace())); + break; + } + + case ABIArgInfo::Ignore: + ResultType = llvm::Type::VoidTy; + break; + + case ABIArgInfo::Coerce: + ResultType = RetAI.getCoerceToType(); + break; + } + + for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = FI.arg_end(); it != ie; ++it) { + const ABIArgInfo &AI = it->info; + + switch (AI.getKind()) { + case ABIArgInfo::Ignore: + break; + + case ABIArgInfo::Coerce: + ArgTys.push_back(AI.getCoerceToType()); + break; + + case ABIArgInfo::Indirect: { + // indirect arguments are always on the stack, which is addr space #0. + const llvm::Type *LTy = ConvertTypeForMem(it->type); + ArgTys.push_back(llvm::PointerType::getUnqual(LTy)); + break; + } + + case ABIArgInfo::Direct: + ArgTys.push_back(ConvertType(it->type)); + break; + + case ABIArgInfo::Expand: + GetExpandedTypes(it->type, ArgTys); + break; + } + } + + return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic); +} + +void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, + const Decl *TargetDecl, + AttributeListType &PAL) { + unsigned FuncAttrs = 0; + unsigned RetAttrs = 0; + + // FIXME: handle sseregparm someday... + if (TargetDecl) { + if (TargetDecl->hasAttr<NoThrowAttr>()) + FuncAttrs |= llvm::Attribute::NoUnwind; + if (TargetDecl->hasAttr<NoReturnAttr>()) + FuncAttrs |= llvm::Attribute::NoReturn; + if (TargetDecl->hasAttr<ConstAttr>()) + FuncAttrs |= llvm::Attribute::ReadNone; + else if (TargetDecl->hasAttr<PureAttr>()) + FuncAttrs |= llvm::Attribute::ReadOnly; + } + + QualType RetTy = FI.getReturnType(); + unsigned Index = 1; + const ABIArgInfo &RetAI = FI.getReturnInfo(); + switch (RetAI.getKind()) { + case ABIArgInfo::Direct: + if (RetTy->isPromotableIntegerType()) { + if (RetTy->isSignedIntegerType()) { + RetAttrs |= llvm::Attribute::SExt; + } else if (RetTy->isUnsignedIntegerType()) { + RetAttrs |= llvm::Attribute::ZExt; + } + } + break; + + case ABIArgInfo::Indirect: + PAL.push_back(llvm::AttributeWithIndex::get(Index, + llvm::Attribute::StructRet | + llvm::Attribute::NoAlias)); + ++Index; + // sret disables readnone and readonly + FuncAttrs &= ~(llvm::Attribute::ReadOnly | + llvm::Attribute::ReadNone); + break; + + case ABIArgInfo::Ignore: + case ABIArgInfo::Coerce: + break; + + case ABIArgInfo::Expand: + assert(0 && "Invalid ABI kind for return argument"); + } + + if (RetAttrs) + PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs)); + + // FIXME: we need to honour command line settings also... + // FIXME: RegParm should be reduced in case of nested functions and/or global + // register variable. + signed RegParm = 0; + if (TargetDecl) + if (const RegparmAttr *RegParmAttr = TargetDecl->getAttr<RegparmAttr>()) + RegParm = RegParmAttr->getNumParams(); + + unsigned PointerWidth = getContext().Target.getPointerWidth(0); + for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), + ie = FI.arg_end(); it != ie; ++it) { + QualType ParamType = it->type; + const ABIArgInfo &AI = it->info; + unsigned Attributes = 0; + + switch (AI.getKind()) { + case ABIArgInfo::Coerce: + break; + + case ABIArgInfo::Indirect: + Attributes |= llvm::Attribute::ByVal; + Attributes |= + llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign()); + // byval disables readnone and readonly. + FuncAttrs &= ~(llvm::Attribute::ReadOnly | + llvm::Attribute::ReadNone); + break; + + case ABIArgInfo::Direct: + if (ParamType->isPromotableIntegerType()) { + if (ParamType->isSignedIntegerType()) { + Attributes |= llvm::Attribute::SExt; + } else if (ParamType->isUnsignedIntegerType()) { + Attributes |= llvm::Attribute::ZExt; + } + } + if (RegParm > 0 && + (ParamType->isIntegerType() || ParamType->isPointerType())) { + RegParm -= + (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth; + if (RegParm >= 0) + Attributes |= llvm::Attribute::InReg; + } + // FIXME: handle sseregparm someday... + break; + + case ABIArgInfo::Ignore: + // Skip increment, no matching LLVM parameter. + continue; + + case ABIArgInfo::Expand: { + std::vector<const llvm::Type*> Tys; + // FIXME: This is rather inefficient. Do we ever actually need to do + // anything here? The result should be just reconstructed on the other + // side, so extension should be a non-issue. + getTypes().GetExpandedTypes(ParamType, Tys); + Index += Tys.size(); + continue; + } + } + + if (Attributes) + PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes)); + ++Index; + } + if (FuncAttrs) + PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs)); +} + +void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, + llvm::Function *Fn, + const FunctionArgList &Args) { + // FIXME: We no longer need the types from FunctionArgList; lift up and + // simplify. + + // Emit allocs for param decls. Give the LLVM Argument nodes names. + llvm::Function::arg_iterator AI = Fn->arg_begin(); + + // Name the struct return argument. + if (CGM.ReturnTypeUsesSret(FI)) { + AI->setName("agg.result"); + ++AI; + } + + assert(FI.arg_size() == Args.size() && + "Mismatch between function signature & arguments."); + CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); + for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); + i != e; ++i, ++info_it) { + const VarDecl *Arg = i->first; + QualType Ty = info_it->type; + const ABIArgInfo &ArgI = info_it->info; + + switch (ArgI.getKind()) { + case ABIArgInfo::Indirect: { + llvm::Value* V = AI; + if (hasAggregateLLVMType(Ty)) { + // Do nothing, aggregates and complex variables are accessed by + // reference. + } else { + // Load scalar value from indirect argument. + V = EmitLoadOfScalar(V, false, Ty); + if (!getContext().typesAreCompatible(Ty, Arg->getType())) { + // This must be a promotion, for something like + // "void a(x) short x; {..." + V = EmitScalarConversion(V, Ty, Arg->getType()); + } + } + EmitParmDecl(*Arg, V); + break; + } + + case ABIArgInfo::Direct: { + assert(AI != Fn->arg_end() && "Argument mismatch!"); + llvm::Value* V = AI; + if (hasAggregateLLVMType(Ty)) { + // Create a temporary alloca to hold the argument; the rest of + // codegen expects to access aggregates & complex values by + // reference. + V = CreateTempAlloca(ConvertTypeForMem(Ty)); + Builder.CreateStore(AI, V); + } else { + if (!getContext().typesAreCompatible(Ty, Arg->getType())) { + // This must be a promotion, for something like + // "void a(x) short x; {..." + V = EmitScalarConversion(V, Ty, Arg->getType()); + } + } + EmitParmDecl(*Arg, V); + break; + } + + case ABIArgInfo::Expand: { + // If this structure was expanded into multiple arguments then + // we need to create a temporary and reconstruct it from the + // arguments. + std::string Name = Arg->getNameAsString(); + llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty), + (Name + ".addr").c_str()); + // FIXME: What are the right qualifiers here? + llvm::Function::arg_iterator End = + ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI); + EmitParmDecl(*Arg, Temp); + + // Name the arguments used in expansion and increment AI. + unsigned Index = 0; + for (; AI != End; ++AI, ++Index) + AI->setName(Name + "." + llvm::utostr(Index)); + continue; + } + + case ABIArgInfo::Ignore: + // Initialize the local variable appropriately. + if (hasAggregateLLVMType(Ty)) { + EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty))); + } else { + EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType()))); + } + + // Skip increment, no matching LLVM parameter. + continue; + + case ABIArgInfo::Coerce: { + assert(AI != Fn->arg_end() && "Argument mismatch!"); + // FIXME: This is very wasteful; EmitParmDecl is just going to drop the + // result in a new alloca anyway, so we could just store into that + // directly if we broke the abstraction down more. + llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce"); + CreateCoercedStore(AI, V, *this); + // Match to what EmitParmDecl is expecting for this type. + if (!CodeGenFunction::hasAggregateLLVMType(Ty)) { + V = EmitLoadOfScalar(V, false, Ty); + if (!getContext().typesAreCompatible(Ty, Arg->getType())) { + // This must be a promotion, for something like + // "void a(x) short x; {..." + V = EmitScalarConversion(V, Ty, Arg->getType()); + } + } + EmitParmDecl(*Arg, V); + break; + } + } + + ++AI; + } + assert(AI == Fn->arg_end() && "Argument mismatch!"); +} + +void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, + llvm::Value *ReturnValue) { + llvm::Value *RV = 0; + + // Functions with no result always return void. + if (ReturnValue) { + QualType RetTy = FI.getReturnType(); + const ABIArgInfo &RetAI = FI.getReturnInfo(); + + switch (RetAI.getKind()) { + case ABIArgInfo::Indirect: + if (RetTy->isAnyComplexType()) { + ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false); + StoreComplexToAddr(RT, CurFn->arg_begin(), false); + } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + EmitAggregateCopy(CurFn->arg_begin(), ReturnValue, RetTy); + } else { + EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(), + false, RetTy); + } + break; + + case ABIArgInfo::Direct: + // The internal return value temp always will have + // pointer-to-return-type type. + RV = Builder.CreateLoad(ReturnValue); + break; + + case ABIArgInfo::Ignore: + break; + + case ABIArgInfo::Coerce: + RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this); + break; + + case ABIArgInfo::Expand: + assert(0 && "Invalid ABI kind for return argument"); + } + } + + if (RV) { + Builder.CreateRet(RV); + } else { + Builder.CreateRetVoid(); + } +} + +RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) { + if (ArgType->isReferenceType()) + return EmitReferenceBindingToExpr(E, ArgType); + + return EmitAnyExprToTemp(E); +} + +RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, + llvm::Value *Callee, + const CallArgList &CallArgs, + const Decl *TargetDecl) { + // FIXME: We no longer need the types from CallArgs; lift up and simplify. + llvm::SmallVector<llvm::Value*, 16> Args; + + // Handle struct-return functions by passing a pointer to the + // location that we would like to return into. + QualType RetTy = CallInfo.getReturnType(); + const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); + if (CGM.ReturnTypeUsesSret(CallInfo)) { + // Create a temporary alloca to hold the result of the call. :( + Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy))); + } + + assert(CallInfo.arg_size() == CallArgs.size() && + "Mismatch between function signature & arguments."); + CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); + for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); + I != E; ++I, ++info_it) { + const ABIArgInfo &ArgInfo = info_it->info; + RValue RV = I->first; + + switch (ArgInfo.getKind()) { + case ABIArgInfo::Indirect: + if (RV.isScalar() || RV.isComplex()) { + // Make a temporary alloca to pass the argument. + Args.push_back(CreateTempAlloca(ConvertTypeForMem(I->second))); + if (RV.isScalar()) + EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second); + else + StoreComplexToAddr(RV.getComplexVal(), Args.back(), false); + } else { + Args.push_back(RV.getAggregateAddr()); + } + break; + + case ABIArgInfo::Direct: + if (RV.isScalar()) { + Args.push_back(RV.getScalarVal()); + } else if (RV.isComplex()) { + llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second)); + Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0); + Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1); + Args.push_back(Tmp); + } else { + Args.push_back(Builder.CreateLoad(RV.getAggregateAddr())); + } + break; + + case ABIArgInfo::Ignore: + break; + + case ABIArgInfo::Coerce: { + // FIXME: Avoid the conversion through memory if possible. + llvm::Value *SrcPtr; + if (RV.isScalar()) { + SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); + EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second); + } else if (RV.isComplex()) { + SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce"); + StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false); + } else + SrcPtr = RV.getAggregateAddr(); + Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), + *this)); + break; + } + + case ABIArgInfo::Expand: + ExpandTypeToArgs(I->second, RV, Args); + break; + } + } + + llvm::BasicBlock *InvokeDest = getInvokeDest(); + CodeGen::AttributeListType AttributeList; + CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList); + llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(), + AttributeList.end()); + + llvm::CallSite CS; + if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) { + CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size()); + } else { + llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); + CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, + Args.data(), Args.data()+Args.size()); + EmitBlock(Cont); + } + + CS.setAttributes(Attrs); + if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) + CS.setCallingConv(F->getCallingConv()); + + // If the call doesn't return, finish the basic block and clear the + // insertion point; this allows the rest of IRgen to discard + // unreachable code. + if (CS.doesNotReturn()) { + Builder.CreateUnreachable(); + Builder.ClearInsertionPoint(); + + // FIXME: For now, emit a dummy basic block because expr emitters in + // generally are not ready to handle emitting expressions at unreachable + // points. + EnsureInsertPoint(); + + // Return a reasonable RValue. + return GetUndefRValue(RetTy); + } + + llvm::Instruction *CI = CS.getInstruction(); + if (Builder.isNamePreserving() && CI->getType() != llvm::Type::VoidTy) + CI->setName("call"); + + switch (RetAI.getKind()) { + case ABIArgInfo::Indirect: + if (RetTy->isAnyComplexType()) + return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); + if (CodeGenFunction::hasAggregateLLVMType(RetTy)) + return RValue::getAggregate(Args[0]); + return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy)); + + case ABIArgInfo::Direct: + if (RetTy->isAnyComplexType()) { + llvm::Value *Real = Builder.CreateExtractValue(CI, 0); + llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); + return RValue::getComplex(std::make_pair(Real, Imag)); + } + if (CodeGenFunction::hasAggregateLLVMType(RetTy)) { + llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp"); + Builder.CreateStore(CI, V); + return RValue::getAggregate(V); + } + return RValue::get(CI); + + case ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return GetUndefRValue(RetTy); + + case ABIArgInfo::Coerce: { + // FIXME: Avoid the conversion through memory if possible. + llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce"); + CreateCoercedStore(CI, V, *this); + if (RetTy->isAnyComplexType()) + return RValue::getComplex(LoadComplexFromAddr(V, false)); + if (CodeGenFunction::hasAggregateLLVMType(RetTy)) + return RValue::getAggregate(V); + return RValue::get(EmitLoadOfScalar(V, false, RetTy)); + } + + case ABIArgInfo::Expand: + assert(0 && "Invalid ABI kind for return argument"); + } + + assert(0 && "Unhandled ABIArgInfo::Kind"); + return RValue::get(0); +} + +/* VarArg handling */ + +llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { + return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); +} diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h new file mode 100644 index 000000000000..daf6f0004501 --- /dev/null +++ b/lib/CodeGen/CGCall.h @@ -0,0 +1,104 @@ +//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// These classes wrap the information about a call or function +// definition used to handle ABI compliancy. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGCALL_H +#define CLANG_CODEGEN_CGCALL_H + +#include <llvm/ADT/FoldingSet.h> +#include "clang/AST/Type.h" + +#include "CGValue.h" + +// FIXME: Restructure so we don't have to expose so much stuff. +#include "ABIInfo.h" + +namespace llvm { + struct AttributeWithIndex; + class Function; + class Type; + class Value; + + template<typename T, unsigned> class SmallVector; +} + +namespace clang { + class ASTContext; + class Decl; + class FunctionDecl; + class ObjCMethodDecl; + class VarDecl; + +namespace CodeGen { + typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType; + + /// CallArgList - Type for representing both the value and type of + /// arguments in a call. + typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList; + + /// FunctionArgList - Type for representing both the decl and type + /// of parameters to a function. The decl must be either a + /// ParmVarDecl or ImplicitParamDecl. + typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>, + 16> FunctionArgList; + + /// CGFunctionInfo - Class to encapsulate the information about a + /// function definition. + class CGFunctionInfo : public llvm::FoldingSetNode { + struct ArgInfo { + QualType type; + ABIArgInfo info; + }; + + unsigned NumArgs; + ArgInfo *Args; + + public: + typedef const ArgInfo *const_arg_iterator; + typedef ArgInfo *arg_iterator; + + CGFunctionInfo(QualType ResTy, + const llvm::SmallVector<QualType, 16> &ArgTys); + ~CGFunctionInfo() { delete[] Args; } + + const_arg_iterator arg_begin() const { return Args + 1; } + const_arg_iterator arg_end() const { return Args + 1 + NumArgs; } + arg_iterator arg_begin() { return Args + 1; } + arg_iterator arg_end() { return Args + 1 + NumArgs; } + + unsigned arg_size() const { return NumArgs; } + + QualType getReturnType() const { return Args[0].type; } + + ABIArgInfo &getReturnInfo() { return Args[0].info; } + const ABIArgInfo &getReturnInfo() const { return Args[0].info; } + + void Profile(llvm::FoldingSetNodeID &ID) { + getReturnType().Profile(ID); + for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it) + it->type.Profile(ID); + } + template<class Iterator> + static void Profile(llvm::FoldingSetNodeID &ID, + QualType ResTy, + Iterator begin, + Iterator end) { + ResTy.Profile(ID); + for (; begin != end; ++begin) + begin->Profile(ID); + } + }; +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp new file mode 100644 index 000000000000..049e716c7c1b --- /dev/null +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -0,0 +1,987 @@ +//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This coordinates the debug information generation while generating code. +// +//===----------------------------------------------------------------------===// + +#include "CGDebugInfo.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/Expr.h" +#include "clang/AST/RecordLayout.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/FileManager.h" +#include "clang/Frontend/CompileOptions.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Instructions.h" +#include "llvm/Intrinsics.h" +#include "llvm/Module.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Dwarf.h" +#include "llvm/System/Path.h" +#include "llvm/Target/TargetMachine.h" +using namespace clang; +using namespace clang::CodeGen; + +CGDebugInfo::CGDebugInfo(CodeGenModule *m) + : M(m), isMainCompileUnitCreated(false), DebugFactory(M->getModule()), + BlockLiteralGenericSet(false) { +} + +CGDebugInfo::~CGDebugInfo() { + assert(RegionStack.empty() && "Region stack mismatch, stack not empty!"); +} + +void CGDebugInfo::setLocation(SourceLocation Loc) { + if (Loc.isValid()) + CurLoc = M->getContext().getSourceManager().getInstantiationLoc(Loc); +} + +/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new +/// one if necessary. This returns null for invalid source locations. +llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) { + // Get source file information. + const char *FileName = "<unknown>"; + SourceManager &SM = M->getContext().getSourceManager(); + unsigned FID = 0; + if (Loc.isValid()) { + PresumedLoc PLoc = SM.getPresumedLoc(Loc); + FileName = PLoc.getFilename(); + FID = PLoc.getIncludeLoc().getRawEncoding(); + } + + // See if this compile unit has been used before. + llvm::DICompileUnit &Unit = CompileUnitCache[FID]; + if (!Unit.isNull()) return Unit; + + // Get absolute path name. + llvm::sys::Path AbsFileName(FileName); + if (!AbsFileName.isAbsolute()) { + llvm::sys::Path tmp = llvm::sys::Path::GetCurrentDirectory(); + tmp.appendComponent(FileName); + AbsFileName = tmp; + } + + // See if thie compile unit is representing main source file. Each source + // file has corresponding compile unit. There is only one main source + // file at a time. + bool isMain = false; + const LangOptions &LO = M->getLangOptions(); + const char *MainFileName = LO.getMainFileName(); + if (isMainCompileUnitCreated == false) { + if (MainFileName) { + if (!strcmp(AbsFileName.getLast().c_str(), MainFileName)) + isMain = true; + } else { + if (Loc.isValid() && SM.isFromMainFile(Loc)) + isMain = true; + } + if (isMain) + isMainCompileUnitCreated = true; + } + + unsigned LangTag; + if (LO.CPlusPlus) { + if (LO.ObjC1) + LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus; + else + LangTag = llvm::dwarf::DW_LANG_C_plus_plus; + } else if (LO.ObjC1) { + LangTag = llvm::dwarf::DW_LANG_ObjC; + } else if (LO.C99) { + LangTag = llvm::dwarf::DW_LANG_C99; + } else { + LangTag = llvm::dwarf::DW_LANG_C89; + } + + std::string Producer = "clang 1.0";// FIXME: clang version. + bool isOptimized = LO.Optimize; + const char *Flags = ""; // FIXME: Encode command line options. + + // Figure out which version of the ObjC runtime we have. + unsigned RuntimeVers = 0; + if (LO.ObjC1) + RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1; + + // Create new compile unit. + return Unit = DebugFactory.CreateCompileUnit(LangTag, AbsFileName.getLast(), + AbsFileName.getDirname(), + Producer, isMain, isOptimized, + Flags, RuntimeVers); +} + +/// CreateType - Get the Basic type from the cache or create a new +/// one if necessary. +llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT, + llvm::DICompileUnit Unit) { + unsigned Encoding = 0; + switch (BT->getKind()) { + default: + case BuiltinType::Void: + return llvm::DIType(); + case BuiltinType::UChar: + case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break; + case BuiltinType::Char_S: + case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break; + case BuiltinType::UShort: + case BuiltinType::UInt: + case BuiltinType::ULong: + case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break; + case BuiltinType::Short: + case BuiltinType::Int: + case BuiltinType::Long: + case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break; + case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break; + case BuiltinType::Float: + case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break; + } + // Bit size, align and offset of the type. + uint64_t Size = M->getContext().getTypeSize(BT); + uint64_t Align = M->getContext().getTypeAlign(BT); + uint64_t Offset = 0; + + return DebugFactory.CreateBasicType(Unit, + BT->getName(M->getContext().getLangOptions().CPlusPlus), + Unit, 0, Size, Align, + Offset, /*flags*/ 0, Encoding); +} + +llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty, + llvm::DICompileUnit Unit) { + // Bit size, align and offset of the type. + unsigned Encoding = llvm::dwarf::DW_ATE_complex_float; + if (Ty->isComplexIntegerType()) + Encoding = llvm::dwarf::DW_ATE_lo_user; + + uint64_t Size = M->getContext().getTypeSize(Ty); + uint64_t Align = M->getContext().getTypeAlign(Ty); + uint64_t Offset = 0; + + return DebugFactory.CreateBasicType(Unit, "complex", + Unit, 0, Size, Align, + Offset, /*flags*/ 0, Encoding); +} + +/// getOrCreateCVRType - Get the CVR qualified type from the cache or create +/// a new one if necessary. +llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) { + // We will create one Derived type for one qualifier and recurse to handle any + // additional ones. + llvm::DIType FromTy; + unsigned Tag; + if (Ty.isConstQualified()) { + Tag = llvm::dwarf::DW_TAG_const_type; + Ty.removeConst(); + FromTy = getOrCreateType(Ty, Unit); + } else if (Ty.isVolatileQualified()) { + Tag = llvm::dwarf::DW_TAG_volatile_type; + Ty.removeVolatile(); + FromTy = getOrCreateType(Ty, Unit); + } else { + assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info"); + Tag = llvm::dwarf::DW_TAG_restrict_type; + Ty.removeRestrict(); + FromTy = getOrCreateType(Ty, Unit); + } + + // No need to fill in the Name, Line, Size, Alignment, Offset in case of + // CVR derived types. + return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(), + 0, 0, 0, 0, 0, FromTy); +} + +llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty, + llvm::DICompileUnit Unit) { + llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit); + + // Bit size, align and offset of the type. + uint64_t Size = M->getContext().getTypeSize(Ty); + uint64_t Align = M->getContext().getTypeAlign(Ty); + + return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit, + "", llvm::DICompileUnit(), + 0, Size, Align, 0, 0, EltTy); +} + +llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty, + llvm::DICompileUnit Unit) { + if (BlockLiteralGenericSet) + return BlockLiteralGeneric; + + llvm::DICompileUnit DefUnit; + unsigned Tag = llvm::dwarf::DW_TAG_structure_type; + + llvm::SmallVector<llvm::DIDescriptor, 5> EltTys; + + llvm::DIType FieldTy; + + QualType FType; + uint64_t FieldSize, FieldOffset; + unsigned FieldAlign; + + llvm::DIArray Elements; + llvm::DIType EltTy, DescTy; + + FieldOffset = 0; + FType = M->getContext().UnsignedLongTy; + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "reserved", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + FType = M->getContext().UnsignedLongTy; + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "Size", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size()); + EltTys.clear(); + + EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor", + DefUnit, 0, FieldOffset, 0, 0, 0, + llvm::DIType(), Elements); + + // Bit size, align and offset of the type. + uint64_t Size = M->getContext().getTypeSize(Ty); + uint64_t Align = M->getContext().getTypeAlign(Ty); + + DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, + Unit, "", llvm::DICompileUnit(), + 0, Size, Align, 0, 0, EltTy); + + FieldOffset = 0; + FType = M->getContext().getPointerType(M->getContext().VoidTy); + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "__isa", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + FType = M->getContext().IntTy; + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "__flags", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + FType = M->getContext().IntTy; + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "__reserved", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + FType = M->getContext().getPointerType(M->getContext().VoidTy); + FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); + FieldSize = M->getContext().getTypeSize(FType); + FieldAlign = M->getContext().getTypeAlign(FType); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "__FuncPtr", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + FType = M->getContext().getPointerType(M->getContext().VoidTy); + FieldTy = DescTy; + FieldSize = M->getContext().getTypeSize(Ty); + FieldAlign = M->getContext().getTypeAlign(Ty); + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + "__descriptor", DefUnit, + 0, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + + FieldOffset += FieldSize; + Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size()); + + EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic", + DefUnit, 0, FieldOffset, 0, 0, 0, + llvm::DIType(), Elements); + + BlockLiteralGenericSet = true; + BlockLiteralGeneric + = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit, + "", llvm::DICompileUnit(), + 0, Size, Align, 0, 0, EltTy); + return BlockLiteralGeneric; +} + +llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, + llvm::DICompileUnit Unit) { + // Typedefs are derived from some other type. If we have a typedef of a + // typedef, make sure to emit the whole chain. + llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit); + + // We don't set size information, but do specify where the typedef was + // declared. + std::string TyName = Ty->getDecl()->getNameAsString(); + SourceLocation DefLoc = Ty->getDecl()->getLocation(); + llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc); + + SourceManager &SM = M->getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(DefLoc); + unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef, Unit, + TyName, DefUnit, Line, 0, 0, 0, 0, Src); +} + +llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty, + llvm::DICompileUnit Unit) { + llvm::SmallVector<llvm::DIDescriptor, 16> EltTys; + + // Add the result type at least. + EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit)); + + // Set up remainder of arguments if there is a prototype. + // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'! + if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) { + for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i) + EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit)); + } else { + // FIXME: Handle () case in C. llvm-gcc doesn't do it either. + } + + llvm::DIArray EltTypeArray = + DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size()); + + return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type, + Unit, "", llvm::DICompileUnit(), + 0, 0, 0, 0, 0, + llvm::DIType(), EltTypeArray); +} + +/// CreateType - get structure or union type. +llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty, + llvm::DICompileUnit Unit) { + RecordDecl *Decl = Ty->getDecl(); + + unsigned Tag; + if (Decl->isStruct()) + Tag = llvm::dwarf::DW_TAG_structure_type; + else if (Decl->isUnion()) + Tag = llvm::dwarf::DW_TAG_union_type; + else { + assert(Decl->isClass() && "Unknown RecordType!"); + Tag = llvm::dwarf::DW_TAG_class_type; + } + + SourceManager &SM = M->getContext().getSourceManager(); + + // Get overall information about the record type for the debug info. + std::string Name = Decl->getNameAsString(); + + PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); + llvm::DICompileUnit DefUnit; + unsigned Line = 0; + if (!PLoc.isInvalid()) { + DefUnit = getOrCreateCompileUnit(Decl->getLocation()); + Line = PLoc.getLine(); + } + + // Records and classes and unions can all be recursive. To handle them, we + // first generate a debug descriptor for the struct as a forward declaration. + // Then (if it is a definition) we go through and get debug info for all of + // its members. Finally, we create a descriptor for the complete type (which + // may refer to the forward decl if the struct is recursive) and replace all + // uses of the forward declaration with the final definition. + llvm::DIType FwdDecl = + DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0, + llvm::DIType(), llvm::DIArray()); + + // If this is just a forward declaration, return it. + if (!Decl->getDefinition(M->getContext())) + return FwdDecl; + + // Otherwise, insert it into the TypeCache so that recursive uses will find + // it. + TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl; + + // Convert all the elements. + llvm::SmallVector<llvm::DIDescriptor, 16> EltTys; + + const ASTRecordLayout &RL = M->getContext().getASTRecordLayout(Decl); + + unsigned FieldNo = 0; + for (RecordDecl::field_iterator I = Decl->field_begin(M->getContext()), + E = Decl->field_end(M->getContext()); + I != E; ++I, ++FieldNo) { + FieldDecl *Field = *I; + llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); + + std::string FieldName = Field->getNameAsString(); + + // Ignore unnamed fields. + if (FieldName.empty()) + continue; + + // Get the location for the field. + SourceLocation FieldDefLoc = Field->getLocation(); + PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc); + llvm::DICompileUnit FieldDefUnit; + unsigned FieldLine = 0; + + if (!PLoc.isInvalid()) { + FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc); + FieldLine = PLoc.getLine(); + } + + QualType FType = Field->getType(); + uint64_t FieldSize = 0; + unsigned FieldAlign = 0; + if (!FType->isIncompleteArrayType()) { + + // Bit size, align and offset of the type. + FieldSize = M->getContext().getTypeSize(FType); + Expr *BitWidth = Field->getBitWidth(); + if (BitWidth) + FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue(); + + FieldAlign = M->getContext().getTypeAlign(FType); + } + + uint64_t FieldOffset = RL.getFieldOffset(FieldNo); + + // Create a DW_TAG_member node to remember the offset of this field in the + // struct. FIXME: This is an absolutely insane way to capture this + // information. When we gut debug info, this should be fixed. + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + FieldName, FieldDefUnit, + FieldLine, FieldSize, FieldAlign, + FieldOffset, 0, FieldTy); + EltTys.push_back(FieldTy); + } + + llvm::DIArray Elements = + DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size()); + + // Bit size, align and offset of the type. + uint64_t Size = M->getContext().getTypeSize(Ty); + uint64_t Align = M->getContext().getTypeAlign(Ty); + + llvm::DIType RealDecl = + DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size, + Align, 0, 0, llvm::DIType(), Elements); + + // Now that we have a real decl for the struct, replace anything using the + // old decl with the new one. This will recursively update the debug info. + FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV()); + FwdDecl.getGV()->eraseFromParent(); + + return RealDecl; +} + +/// CreateType - get objective-c interface type. +llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, + llvm::DICompileUnit Unit) { + ObjCInterfaceDecl *Decl = Ty->getDecl(); + + unsigned Tag = llvm::dwarf::DW_TAG_structure_type; + SourceManager &SM = M->getContext().getSourceManager(); + + // Get overall information about the record type for the debug info. + std::string Name = Decl->getNameAsString(); + + llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(Decl->getLocation()); + PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); + unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + + unsigned RuntimeLang = DefUnit.getLanguage(); + + // To handle recursive interface, we + // first generate a debug descriptor for the struct as a forward declaration. + // Then (if it is a definition) we go through and get debug info for all of + // its members. Finally, we create a descriptor for the complete type (which + // may refer to the forward decl if the struct is recursive) and replace all + // uses of the forward declaration with the final definition. + llvm::DIType FwdDecl = + DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0, + llvm::DIType(), llvm::DIArray(), + RuntimeLang); + + // If this is just a forward declaration, return it. + if (Decl->isForwardDecl()) + return FwdDecl; + + // Otherwise, insert it into the TypeCache so that recursive uses will find + // it. + TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl; + + // Convert all the elements. + llvm::SmallVector<llvm::DIDescriptor, 16> EltTys; + + ObjCInterfaceDecl *SClass = Decl->getSuperClass(); + if (SClass) { + llvm::DIType SClassTy = + getOrCreateType(M->getContext().getObjCInterfaceType(SClass), Unit); + llvm::DIType InhTag = + DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance, + Unit, "", llvm::DICompileUnit(), 0, 0, 0, + 0 /* offset */, 0, SClassTy); + EltTys.push_back(InhTag); + } + + const ASTRecordLayout &RL = M->getContext().getASTObjCInterfaceLayout(Decl); + + unsigned FieldNo = 0; + for (ObjCInterfaceDecl::ivar_iterator I = Decl->ivar_begin(), + E = Decl->ivar_end(); I != E; ++I, ++FieldNo) { + ObjCIvarDecl *Field = *I; + llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit); + + std::string FieldName = Field->getNameAsString(); + + // Ignore unnamed fields. + if (FieldName.empty()) + continue; + + // Get the location for the field. + SourceLocation FieldDefLoc = Field->getLocation(); + llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc); + PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc); + unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + + QualType FType = Field->getType(); + uint64_t FieldSize = 0; + unsigned FieldAlign = 0; + + if (!FType->isIncompleteArrayType()) { + + // Bit size, align and offset of the type. + FieldSize = M->getContext().getTypeSize(FType); + Expr *BitWidth = Field->getBitWidth(); + if (BitWidth) + FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue(); + + FieldAlign = M->getContext().getTypeAlign(FType); + } + + uint64_t FieldOffset = RL.getFieldOffset(FieldNo); + + unsigned Flags = 0; + if (Field->getAccessControl() == ObjCIvarDecl::Protected) + Flags = llvm::DIType::FlagProtected; + else if (Field->getAccessControl() == ObjCIvarDecl::Private) + Flags = llvm::DIType::FlagPrivate; + + // Create a DW_TAG_member node to remember the offset of this field in the + // struct. FIXME: This is an absolutely insane way to capture this + // information. When we gut debug info, this should be fixed. + FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit, + FieldName, FieldDefUnit, + FieldLine, FieldSize, FieldAlign, + FieldOffset, Flags, FieldTy); + EltTys.push_back(FieldTy); + } + + llvm::DIArray Elements = + DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size()); + + // Bit size, align and offset of the type. + uint64_t Size = M->getContext().getTypeSize(Ty); + uint64_t Align = M->getContext().getTypeAlign(Ty); + + llvm::DIType RealDecl = + DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size, + Align, 0, 0, llvm::DIType(), Elements, + RuntimeLang); + + // Now that we have a real decl for the struct, replace anything using the + // old decl with the new one. This will recursively update the debug info. + FwdDecl.getGV()->replaceAllUsesWith(RealDecl.getGV()); + FwdDecl.getGV()->eraseFromParent(); + + return RealDecl; +} + +llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty, + llvm::DICompileUnit Unit) { + EnumDecl *Decl = Ty->getDecl(); + + llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators; + + // Create DIEnumerator elements for each enumerator. + for (EnumDecl::enumerator_iterator + Enum = Decl->enumerator_begin(M->getContext()), + EnumEnd = Decl->enumerator_end(M->getContext()); + Enum != EnumEnd; ++Enum) { + Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsString(), + Enum->getInitVal().getZExtValue())); + } + + // Return a CompositeType for the enum itself. + llvm::DIArray EltArray = + DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size()); + + std::string EnumName = Decl->getNameAsString(); + SourceLocation DefLoc = Decl->getLocation(); + llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc); + SourceManager &SM = M->getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(DefLoc); + unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + + // Size and align of the type. + uint64_t Size = 0; + unsigned Align = 0; + if (!Ty->isIncompleteType()) { + Size = M->getContext().getTypeSize(Ty); + Align = M->getContext().getTypeAlign(Ty); + } + + return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type, + Unit, EnumName, DefUnit, Line, + Size, Align, 0, 0, + llvm::DIType(), EltArray); +} + +llvm::DIType CGDebugInfo::CreateType(const TagType *Ty, + llvm::DICompileUnit Unit) { + if (const RecordType *RT = dyn_cast<RecordType>(Ty)) + return CreateType(RT, Unit); + else if (const EnumType *ET = dyn_cast<EnumType>(Ty)) + return CreateType(ET, Unit); + + return llvm::DIType(); +} + +llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty, + llvm::DICompileUnit Unit) { + uint64_t Size; + uint64_t Align; + + + // FIXME: make getTypeAlign() aware of VLAs and incomplete array types + if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) { + Size = 0; + Align = + M->getContext().getTypeAlign(M->getContext().getBaseElementType(VAT)); + } else if (Ty->isIncompleteArrayType()) { + Size = 0; + Align = M->getContext().getTypeAlign(Ty->getElementType()); + } else { + // Size and align of the whole array, not the element type. + Size = M->getContext().getTypeSize(Ty); + Align = M->getContext().getTypeAlign(Ty); + } + + // Add the dimensions of the array. FIXME: This loses CV qualifiers from + // interior arrays, do we care? Why aren't nested arrays represented the + // obvious/recursive way? + llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts; + QualType EltTy(Ty, 0); + while ((Ty = dyn_cast<ArrayType>(EltTy))) { + uint64_t Upper = 0; + if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) + Upper = CAT->getSize().getZExtValue() - 1; + // FIXME: Verify this is right for VLAs. + Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper)); + EltTy = Ty->getElementType(); + } + + llvm::DIArray SubscriptArray = + DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size()); + + return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type, + Unit, "", llvm::DICompileUnit(), + 0, Size, Align, 0, 0, + getOrCreateType(EltTy, Unit), + SubscriptArray); +} + + +/// getOrCreateType - Get the type from the cache or create a new +/// one if necessary. +llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, + llvm::DICompileUnit Unit) { + if (Ty.isNull()) + return llvm::DIType(); + + // Check to see if the compile unit already has created this type. + llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()]; + if (!Slot.isNull()) return Slot; + + // Handle CVR qualifiers, which recursively handles what they refer to. + if (Ty.getCVRQualifiers()) + return Slot = CreateCVRType(Ty, Unit); + + // Work out details of type. + switch (Ty->getTypeClass()) { +#define TYPE(Class, Base) +#define ABSTRACT_TYPE(Class, Base) +#define NON_CANONICAL_TYPE(Class, Base) +#define DEPENDENT_TYPE(Class, Base) case Type::Class: +#include "clang/AST/TypeNodes.def" + assert(false && "Dependent types cannot show up in debug information"); + + case Type::LValueReference: + case Type::RValueReference: + case Type::Vector: + case Type::ExtVector: + case Type::ExtQual: + case Type::FixedWidthInt: + case Type::MemberPointer: + case Type::TemplateSpecialization: + case Type::QualifiedName: + // Unsupported types + return llvm::DIType(); + case Type::ObjCQualifiedId: // Encode id<p> in debug info just like id. + return Slot = getOrCreateType(M->getContext().getObjCIdType(), Unit); + + case Type::ObjCQualifiedInterface: // Drop protocols from interface. + case Type::ObjCInterface: + return Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit); + case Type::Builtin: return Slot = CreateType(cast<BuiltinType>(Ty), Unit); + case Type::Complex: return Slot = CreateType(cast<ComplexType>(Ty), Unit); + case Type::Pointer: return Slot = CreateType(cast<PointerType>(Ty), Unit); + case Type::BlockPointer: + return Slot = CreateType(cast<BlockPointerType>(Ty), Unit); + case Type::Typedef: return Slot = CreateType(cast<TypedefType>(Ty), Unit); + case Type::Record: + case Type::Enum: + return Slot = CreateType(cast<TagType>(Ty), Unit); + case Type::FunctionProto: + case Type::FunctionNoProto: + return Slot = CreateType(cast<FunctionType>(Ty), Unit); + + case Type::ConstantArray: + case Type::VariableArray: + case Type::IncompleteArray: + return Slot = CreateType(cast<ArrayType>(Ty), Unit); + case Type::TypeOfExpr: + return Slot = getOrCreateType(cast<TypeOfExprType>(Ty)->getUnderlyingExpr() + ->getType(), Unit); + case Type::TypeOf: + return Slot = getOrCreateType(cast<TypeOfType>(Ty)->getUnderlyingType(), + Unit); + } + + return Slot; +} + +/// EmitFunctionStart - Constructs the debug code for entering a function - +/// "llvm.dbg.func.start.". +void CGDebugInfo::EmitFunctionStart(const char *Name, QualType ReturnType, + llvm::Function *Fn, + CGBuilderTy &Builder) { + const char *LinkageName = Name; + + // Skip the asm prefix if it exists. + // + // FIXME: This should probably be the unmangled name? + if (Name[0] == '\01') + ++Name; + + // FIXME: Why is this using CurLoc??? + llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc); + SourceManager &SM = M->getContext().getSourceManager(); + unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine(); + + llvm::DISubprogram SP = + DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo, + getOrCreateType(ReturnType, Unit), + Fn->hasInternalLinkage(), true/*definition*/); + + DebugFactory.InsertSubprogramStart(SP, Builder.GetInsertBlock()); + + // Push function on region stack. + RegionStack.push_back(SP); +} + + +void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) { + if (CurLoc.isInvalid() || CurLoc.isMacroID()) return; + + // Don't bother if things are the same as last time. + SourceManager &SM = M->getContext().getSourceManager(); + if (CurLoc == PrevLoc + || (SM.getInstantiationLineNumber(CurLoc) == + SM.getInstantiationLineNumber(PrevLoc) + && SM.isFromSameFile(CurLoc, PrevLoc))) + return; + + // Update last state. + PrevLoc = CurLoc; + + // Get the appropriate compile unit. + llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc); + PresumedLoc PLoc = SM.getPresumedLoc(CurLoc); + DebugFactory.InsertStopPoint(Unit, PLoc.getLine(), PLoc.getColumn(), + Builder.GetInsertBlock()); +} + +/// EmitRegionStart- Constructs the debug code for entering a declarative +/// region - "llvm.dbg.region.start.". +void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) { + llvm::DIDescriptor D; + if (!RegionStack.empty()) + D = RegionStack.back(); + D = DebugFactory.CreateBlock(D); + RegionStack.push_back(D); + DebugFactory.InsertRegionStart(D, Builder.GetInsertBlock()); +} + +/// EmitRegionEnd - Constructs the debug code for exiting a declarative +/// region - "llvm.dbg.region.end." +void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) { + assert(!RegionStack.empty() && "Region stack mismatch, stack empty!"); + + // Provide an region stop point. + EmitStopPoint(Fn, Builder); + + DebugFactory.InsertRegionEnd(RegionStack.back(), Builder.GetInsertBlock()); + RegionStack.pop_back(); +} + +/// EmitDeclare - Emit local variable declaration debug info. +void CGDebugInfo::EmitDeclare(const VarDecl *Decl, unsigned Tag, + llvm::Value *Storage, CGBuilderTy &Builder) { + assert(!RegionStack.empty() && "Region stack mismatch, stack empty!"); + + // Do not emit variable debug information while generating optimized code. + // The llvm optimizer and code generator are not yet ready to support + // optimized code debugging. + const CompileOptions &CO = M->getCompileOpts(); + if (CO.OptimizationLevel) + return; + + llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation()); + llvm::DIType Ty = getOrCreateType(Decl->getType(), Unit); + + // Get location information. + SourceManager &SM = M->getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); + unsigned Line = 0; + if (!PLoc.isInvalid()) + Line = PLoc.getLine(); + else + Unit = llvm::DICompileUnit(); + + + // Create the descriptor for the variable. + llvm::DIVariable D = + DebugFactory.CreateVariable(Tag, RegionStack.back(),Decl->getNameAsString(), + Unit, Line, Ty); + // Insert an llvm.dbg.declare into the current block. + DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock()); +} + +void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *Decl, + llvm::Value *Storage, + CGBuilderTy &Builder) { + EmitDeclare(Decl, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder); +} + +/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument +/// variable declaration. +void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, + CGBuilderTy &Builder) { + EmitDeclare(Decl, llvm::dwarf::DW_TAG_arg_variable, AI, Builder); +} + + + +/// EmitGlobalVariable - Emit information about a global variable. +void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, + const VarDecl *Decl) { + + // Do not emit variable debug information while generating optimized code. + // The llvm optimizer and code generator are not yet ready to support + // optimized code debugging. + const CompileOptions &CO = M->getCompileOpts(); + if (CO.OptimizationLevel) + return; + + // Create global variable debug descriptor. + llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation()); + SourceManager &SM = M->getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); + unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + std::string Name = Decl->getNameAsString(); + + QualType T = Decl->getType(); + if (T->isIncompleteArrayType()) { + + // CodeGen turns int[] into int[1] so we'll do the same here. + llvm::APSInt ConstVal(32); + + ConstVal = 1; + QualType ET = M->getContext().getAsArrayType(T)->getElementType(); + + T = M->getContext().getConstantArrayType(ET, ConstVal, + ArrayType::Normal, 0); + } + + DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo, + getOrCreateType(T, Unit), + Var->hasInternalLinkage(), + true/*definition*/, Var); +} + +/// EmitGlobalVariable - Emit information about an objective-c interface. +void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, + ObjCInterfaceDecl *Decl) { + // Create global variable debug descriptor. + llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation()); + SourceManager &SM = M->getContext().getSourceManager(); + PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation()); + unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine(); + + std::string Name = Decl->getNameAsString(); + + QualType T = M->getContext().getObjCInterfaceType(Decl); + if (T->isIncompleteArrayType()) { + + // CodeGen turns int[] into int[1] so we'll do the same here. + llvm::APSInt ConstVal(32); + + ConstVal = 1; + QualType ET = M->getContext().getAsArrayType(T)->getElementType(); + + T = M->getContext().getConstantArrayType(ET, ConstVal, + ArrayType::Normal, 0); + } + + DebugFactory.CreateGlobalVariable(Unit, Name, Name, "", Unit, LineNo, + getOrCreateType(T, Unit), + Var->hasInternalLinkage(), + true/*definition*/, Var); +} + diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h new file mode 100644 index 000000000000..de655800fa08 --- /dev/null +++ b/lib/CodeGen/CGDebugInfo.h @@ -0,0 +1,126 @@ +//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the source level debug info generator for llvm translation. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CGDEBUGINFO_H +#define CLANG_CODEGEN_CGDEBUGINFO_H + +#include "clang/AST/Type.h" +#include "clang/Basic/SourceLocation.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Analysis/DebugInfo.h" +#include <map> + +#include "CGBuilder.h" + +namespace clang { + class VarDecl; + class ObjCInterfaceDecl; + +namespace CodeGen { + class CodeGenModule; + +/// CGDebugInfo - This class gathers all debug information during compilation +/// and is responsible for emitting to llvm globals or pass directly to +/// the backend. +class CGDebugInfo { + CodeGenModule *M; + bool isMainCompileUnitCreated; + llvm::DIFactory DebugFactory; + + SourceLocation CurLoc, PrevLoc; + + /// CompileUnitCache - Cache of previously constructed CompileUnits. + llvm::DenseMap<unsigned, llvm::DICompileUnit> CompileUnitCache; + + /// TypeCache - Cache of previously constructed Types. + // FIXME: Eliminate this map. Be careful of iterator invalidation. + std::map<void *, llvm::DIType> TypeCache; + + bool BlockLiteralGenericSet; + llvm::DIType BlockLiteralGeneric; + + std::vector<llvm::DIDescriptor> RegionStack; + + /// Helper functions for getOrCreateType. + llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateCVRType(QualType Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const FunctionType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const TagType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const RecordType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const EnumType *Ty, llvm::DICompileUnit U); + llvm::DIType CreateType(const ArrayType *Ty, llvm::DICompileUnit U); + +public: + CGDebugInfo(CodeGenModule *m); + ~CGDebugInfo(); + + /// setLocation - Update the current source location. If \arg loc is + /// invalid it is ignored. + void setLocation(SourceLocation Loc); + + /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of + /// source line. + void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder); + + /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate + /// start of a new function. + void EmitFunctionStart(const char *Name, QualType ReturnType, + llvm::Function *Fn, CGBuilderTy &Builder); + + /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start + /// of a new block. + void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder); + + /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a + /// block. + void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder); + + /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic + /// variable declaration. + void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI, + CGBuilderTy &Builder); + + /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument + /// variable declaration. + void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI, + CGBuilderTy &Builder); + + /// EmitGlobalVariable - Emit information about a global variable. + void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl); + + /// EmitGlobalVariable - Emit information about an objective-c interface. + void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl); + +private: + /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration. + void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI, + CGBuilderTy &Builder); + + + /// getOrCreateCompileUnit - Get the compile unit from the cache or create a + /// new one if necessary. + llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc); + + /// getOrCreateType - Get the type from the cache or create a new type if + /// necessary. + llvm::DIType getOrCreateType(QualType Ty, llvm::DICompileUnit Unit); +}; +} // namespace CodeGen +} // namespace clang + +#endif diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp new file mode 100644 index 000000000000..bcad77be51c2 --- /dev/null +++ b/lib/CodeGen/CGDecl.cpp @@ -0,0 +1,489 @@ +//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Decl nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CGDebugInfo.h" +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Intrinsics.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Type.h" +using namespace clang; +using namespace CodeGen; + + +void CodeGenFunction::EmitDecl(const Decl &D) { + switch (D.getKind()) { + default: assert(0 && "Unknown decl kind!"); + case Decl::ParmVar: + assert(0 && "Parmdecls should not be in declstmts!"); + case Decl::Function: // void X(); + case Decl::Record: // struct/union/class X; + case Decl::Enum: // enum X; + case Decl::EnumConstant: // enum ? { X = ? } + case Decl::CXXRecord: // struct/union/class X; [C++] + // None of these decls require codegen support. + return; + + case Decl::Var: { + const VarDecl &VD = cast<VarDecl>(D); + assert(VD.isBlockVarDecl() && + "Should not see file-scope variables inside a function!"); + return EmitBlockVarDecl(VD); + } + + case Decl::Typedef: { // typedef int X; + const TypedefDecl &TD = cast<TypedefDecl>(D); + QualType Ty = TD.getUnderlyingType(); + + if (Ty->isVariablyModifiedType()) + EmitVLASize(Ty); + } + } +} + +/// EmitBlockVarDecl - This method handles emission of any variable declaration +/// inside a function, including static vars etc. +void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) { + if (D.hasAttr<AsmLabelAttr>()) + CGM.ErrorUnsupported(&D, "__asm__"); + + switch (D.getStorageClass()) { + case VarDecl::None: + case VarDecl::Auto: + case VarDecl::Register: + return EmitLocalBlockVarDecl(D); + case VarDecl::Static: + return EmitStaticBlockVarDecl(D); + case VarDecl::Extern: + case VarDecl::PrivateExtern: + // Don't emit it now, allow it to be emitted lazily on its first use. + return; + } + + assert(0 && "Unknown storage class"); +} + +llvm::GlobalVariable * +CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D, + const char *Separator, + llvm::GlobalValue::LinkageTypes + Linkage) { + QualType Ty = D.getType(); + assert(Ty->isConstantSizeType() && "VLAs can't be static"); + + std::string Name; + if (getContext().getLangOptions().CPlusPlus) { + Name = CGM.getMangledName(&D); + } else { + std::string ContextName; + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl)) + ContextName = CGM.getMangledName(FD); + else if (isa<ObjCMethodDecl>(CurFuncDecl)) + ContextName = std::string(CurFn->getNameStart(), + CurFn->getNameStart() + CurFn->getNameLen()); + else + assert(0 && "Unknown context for block var decl"); + + Name = ContextName + Separator + D.getNameAsString(); + } + + const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty); + return new llvm::GlobalVariable(LTy, Ty.isConstant(getContext()), Linkage, + llvm::Constant::getNullValue(LTy), Name, + &CGM.getModule(), D.isThreadSpecified(), + Ty.getAddressSpace()); +} + +void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) { + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + + llvm::GlobalVariable *GV = + CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage); + + // Store into LocalDeclMap before generating initializer to handle + // circular references. + DMEntry = GV; + + // Make sure to evaluate VLA bounds now so that we have them for later. + if (D.getType()->isVariablyModifiedType()) + EmitVLASize(D.getType()); + + if (D.getType()->isReferenceType()) { + CGM.ErrorUnsupported(&D, "static declaration with reference type"); + return; + } + + if (D.getInit()) { + llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this); + + // If constant emission failed, then this should be a C++ static + // initializer. + if (!Init) { + if (!getContext().getLangOptions().CPlusPlus) + CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); + else + GenerateStaticCXXBlockVarDeclInit(D, GV); + } else { + // The initializer may differ in type from the global. Rewrite + // the global to match the initializer. (We have to do this + // because some types, like unions, can't be completely represented + // in the LLVM type system.) + if (GV->getType() != Init->getType()) { + llvm::GlobalVariable *OldGV = GV; + + GV = new llvm::GlobalVariable(Init->getType(), OldGV->isConstant(), + OldGV->getLinkage(), Init, "", + &CGM.getModule(), D.isThreadSpecified(), + D.getType().getAddressSpace()); + + // Steal the name of the old global + GV->takeName(OldGV); + + // Replace all uses of the old global with the new global + llvm::Constant *NewPtrForOldDecl = + llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); + OldGV->replaceAllUsesWith(NewPtrForOldDecl); + + // Erase the old global, since it is no longer used. + OldGV->eraseFromParent(); + } + + GV->setInitializer(Init); + } + } + + // FIXME: Merge attribute handling. + if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) { + SourceManager &SM = CGM.getContext().getSourceManager(); + llvm::Constant *Ann = + CGM.EmitAnnotateAttr(GV, AA, + SM.getInstantiationLineNumber(D.getLocation())); + CGM.AddAnnotation(Ann); + } + + if (const SectionAttr *SA = D.getAttr<SectionAttr>()) + GV->setSection(SA->getName()); + + if (D.hasAttr<UsedAttr>()) + CGM.AddUsedGlobal(GV); + + // We may have to cast the constant because of the initializer + // mismatch above. + // + // FIXME: It is really dangerous to store this in the map; if anyone + // RAUW's the GV uses of this constant will be invalid. + const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType()); + const llvm::Type *LPtrTy = + llvm::PointerType::get(LTy, D.getType().getAddressSpace()); + DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy); + + // Emit global variable debug descriptor for static vars. + CGDebugInfo *DI = getDebugInfo(); + if (DI) { + DI->setLocation(D.getLocation()); + DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D); + } +} + +/// BuildByRefType - This routine changes a __block variable declared as T x +/// into: +/// +/// struct { +/// void *__isa; +/// void *__forwarding; +/// int32_t __flags; +/// int32_t __size; +/// void *__copy_helper; +/// void *__destroy_helper; +/// T x; +/// } x +/// +/// Align is the alignment needed in bytes for x. +const llvm::Type *CodeGenFunction::BuildByRefType(QualType Ty, + uint64_t Align) { + const llvm::Type *LTy = ConvertType(Ty); + bool needsCopyDispose = BlockRequiresCopying(Ty); + std::vector<const llvm::Type *> Types(needsCopyDispose*2+5); + const llvm::PointerType *PtrToInt8Ty + = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Types[0] = PtrToInt8Ty; + Types[1] = PtrToInt8Ty; + Types[2] = llvm::Type::Int32Ty; + Types[3] = llvm::Type::Int32Ty; + if (needsCopyDispose) { + Types[4] = PtrToInt8Ty; + Types[5] = PtrToInt8Ty; + } + // FIXME: Align this on at least an Align boundary. + Types[needsCopyDispose*2 + 4] = LTy; + return llvm::StructType::get(Types, false); +} + +/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a +/// variable declaration with auto, register, or no storage class specifier. +/// These turn into simple stack objects, or GlobalValues depending on target. +void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { + QualType Ty = D.getType(); + bool isByRef = D.hasAttr<BlocksAttr>(); + bool needsDispose = false; + + llvm::Value *DeclPtr; + if (Ty->isConstantSizeType()) { + if (!Target.useGlobalsForAutomaticVariables()) { + // A normal fixed sized variable becomes an alloca in the entry block. + const llvm::Type *LTy = ConvertTypeForMem(Ty); + if (isByRef) + LTy = BuildByRefType(Ty, getContext().getDeclAlignInBytes(&D)); + llvm::AllocaInst *Alloc = CreateTempAlloca(LTy); + Alloc->setName(D.getNameAsString().c_str()); + + if (isByRef) + Alloc->setAlignment(std::max(getContext().getDeclAlignInBytes(&D), + unsigned(Target.getPointerAlign(0) / 8))); + else + Alloc->setAlignment(getContext().getDeclAlignInBytes(&D)); + DeclPtr = Alloc; + } else { + // Targets that don't support recursion emit locals as globals. + const char *Class = + D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto."; + DeclPtr = CreateStaticBlockVarDecl(D, Class, + llvm::GlobalValue + ::InternalLinkage); + } + + if (Ty->isVariablyModifiedType()) + EmitVLASize(Ty); + } else { + if (!DidCallStackSave) { + // Save the stack. + const llvm::Type *LTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack"); + + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); + llvm::Value *V = Builder.CreateCall(F); + + Builder.CreateStore(V, Stack); + + DidCallStackSave = true; + + { + // Push a cleanup block and restore the stack there. + CleanupScope scope(*this); + + V = Builder.CreateLoad(Stack, "tmp"); + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore); + Builder.CreateCall(F, V); + } + } + + // Get the element type. + const llvm::Type *LElemTy = ConvertTypeForMem(Ty); + const llvm::Type *LElemPtrTy = + llvm::PointerType::get(LElemTy, D.getType().getAddressSpace()); + + llvm::Value *VLASize = EmitVLASize(Ty); + + // Downcast the VLA size expression + VLASize = Builder.CreateIntCast(VLASize, llvm::Type::Int32Ty, false, "tmp"); + + // Allocate memory for the array. + llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::Int8Ty, VLASize, "vla"); + DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp"); + } + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + DMEntry = DeclPtr; + + // Emit debug info for local var declaration. + if (CGDebugInfo *DI = getDebugInfo()) { + DI->setLocation(D.getLocation()); + if (Target.useGlobalsForAutomaticVariables()) { + DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D); + } + else if (isByRef) { + llvm::Value *Loc; + bool needsCopyDispose = BlockRequiresCopying(Ty); + Loc = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); + Loc = Builder.CreateLoad(Loc, false); + Loc = Builder.CreateBitCast(Loc, DeclPtr->getType()); + Loc = Builder.CreateStructGEP(Loc, needsCopyDispose*2+4, "x"); + DI->EmitDeclareOfAutoVariable(&D, Loc, Builder); + } else + DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder); + } + + // If this local has an initializer, emit it now. + if (const Expr *Init = D.getInit()) { + llvm::Value *Loc = DeclPtr; + if (isByRef) { + bool needsCopyDispose = BlockRequiresCopying(Ty); + Loc = Builder.CreateStructGEP(DeclPtr, needsCopyDispose*2+4, "x"); + } + if (Ty->isReferenceType()) { + llvm::Value *V = EmitReferenceBindingToExpr(Init, Ty).getScalarVal(); + EmitStoreOfScalar(V, Loc, false, Ty); + } else if (!hasAggregateLLVMType(Init->getType())) { + llvm::Value *V = EmitScalarExpr(Init); + EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(), + D.getType()); + } else if (Init->getType()->isAnyComplexType()) { + EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified()); + } else { + EmitAggExpr(Init, Loc, D.getType().isVolatileQualified()); + } + } + if (isByRef) { + const llvm::PointerType *PtrToInt8Ty + = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + + llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0); + llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1); + llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2); + llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3); + llvm::Value *V; + int flag = 0; + int flags = 0; + + needsDispose = true; + + if (Ty->isBlockPointerType()) { + flag |= BLOCK_FIELD_IS_BLOCK; + flags |= BLOCK_HAS_COPY_DISPOSE; + } else if (BlockRequiresCopying(Ty)) { + flag |= BLOCK_FIELD_IS_OBJECT; + flags |= BLOCK_HAS_COPY_DISPOSE; + } + + // FIXME: Someone double check this. + if (Ty.isObjCGCWeak()) + flag |= BLOCK_FIELD_IS_WEAK; + + int isa = 0; + if (flag&BLOCK_FIELD_IS_WEAK) + isa = 1; + V = llvm::ConstantInt::get(llvm::Type::Int32Ty, isa); + V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); + Builder.CreateStore(V, isa_field); + + V = Builder.CreateBitCast(DeclPtr, PtrToInt8Ty, "forwarding"); + Builder.CreateStore(V, forwarding_field); + + V = llvm::ConstantInt::get(llvm::Type::Int32Ty, flags); + Builder.CreateStore(V, flags_field); + + const llvm::Type *V1; + V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType(); + V = llvm::ConstantInt::get(llvm::Type::Int32Ty, + (CGM.getTargetData().getTypeStoreSizeInBits(V1) + / 8)); + Builder.CreateStore(V, size_field); + + if (flags & BLOCK_HAS_COPY_DISPOSE) { + BlockHasCopyDispose = true; + llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4); + Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag), + copy_helper); + + llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5); + Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag), + destroy_helper); + } + } + + // Handle the cleanup attribute + if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { + const FunctionDecl *FD = CA->getFunctionDecl(); + + llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD)); + assert(F && "Could not find function!"); + + CleanupScope scope(*this); + + const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD); + + // In some cases, the type of the function argument will be different from + // the type of the pointer. An example of this is + // void f(void* arg); + // __attribute__((cleanup(f))) void *g; + // + // To fix this we insert a bitcast here. + QualType ArgTy = Info.arg_begin()->type; + DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy)); + + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(DeclPtr), + getContext().getPointerType(D.getType()))); + + EmitCall(Info, F, Args); + } + + if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) { + CleanupScope scope(*this); + llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding"); + V = Builder.CreateLoad(V, false); + BuildBlockRelease(V); + } +} + +/// Emit an alloca (or GlobalValue depending on target) +/// for the specified parameter and set up LocalDeclMap. +void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) { + // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? + assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && + "Invalid argument to EmitParmDecl"); + QualType Ty = D.getType(); + + llvm::Value *DeclPtr; + if (!Ty->isConstantSizeType()) { + // Variable sized values always are passed by-reference. + DeclPtr = Arg; + } else { + // A fixed sized single-value variable becomes an alloca in the entry block. + const llvm::Type *LTy = ConvertTypeForMem(Ty); + if (LTy->isSingleValueType()) { + // TODO: Alignment + std::string Name = D.getNameAsString(); + Name += ".addr"; + DeclPtr = CreateTempAlloca(LTy); + DeclPtr->setName(Name.c_str()); + + // Store the initial value into the alloca. + EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty); + } else { + // Otherwise, if this is an aggregate, just use the input pointer. + DeclPtr = Arg; + } + Arg->setName(D.getNameAsString()); + } + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + DMEntry = DeclPtr; + + // Emit debug info for param declaration. + if (CGDebugInfo *DI = getDebugInfo()) { + DI->setLocation(D.getLocation()); + DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder); + } +} + diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp new file mode 100644 index 000000000000..c5f23879d1c3 --- /dev/null +++ b/lib/CodeGen/CGExpr.cpp @@ -0,0 +1,1324 @@ +//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "CGCall.h" +#include "CGObjCRuntime.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "llvm/Target/TargetData.h" +using namespace clang; +using namespace CodeGen; + +//===--------------------------------------------------------------------===// +// Miscellaneous Helper Methods +//===--------------------------------------------------------------------===// + +/// CreateTempAlloca - This creates a alloca and inserts it into the entry +/// block. +llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, + const char *Name) { + if (!Builder.isNamePreserving()) + Name = ""; + return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); +} + +/// EvaluateExprAsBool - Perform the usual unary conversions on the specified +/// expression and compare the result against zero, returning an Int1Ty value. +llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { + QualType BoolTy = getContext().BoolTy; + if (!E->getType()->isAnyComplexType()) + return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); + + return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); +} + +/// EmitAnyExpr - Emit code to compute the specified expression which can have +/// any type. The result is returned as an RValue struct. If this is an +/// aggregate expression, the aggloc/agglocvolatile arguments indicate where +/// the result should be returned. +RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, + bool isAggLocVolatile, bool IgnoreResult) { + if (!hasAggregateLLVMType(E->getType())) + return RValue::get(EmitScalarExpr(E, IgnoreResult)); + else if (E->getType()->isAnyComplexType()) + return RValue::getComplex(EmitComplexExpr(E, false, false, + IgnoreResult, IgnoreResult)); + + EmitAggExpr(E, AggLoc, isAggLocVolatile, IgnoreResult); + return RValue::getAggregate(AggLoc, isAggLocVolatile); +} + +/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result +/// will always be accessible even if no aggregate location is +/// provided. +RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, llvm::Value *AggLoc, + bool isAggLocVolatile) { + if (!AggLoc && hasAggregateLLVMType(E->getType()) && + !E->getType()->isAnyComplexType()) + AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); + return EmitAnyExpr(E, AggLoc, isAggLocVolatile); +} + +RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, + QualType DestType) { + RValue Val; + if (E->isLvalue(getContext()) == Expr::LV_Valid) { + // Emit the expr as an lvalue. + LValue LV = EmitLValue(E); + if (LV.isSimple()) + return RValue::get(LV.getAddress()); + Val = EmitLoadOfLValue(LV, E->getType()); + } else { + Val = EmitAnyExprToTemp(E); + } + + if (Val.isAggregate()) { + Val = RValue::get(Val.getAggregateAddr()); + } else { + // Create a temporary variable that we can bind the reference to. + llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), + "reftmp"); + if (Val.isScalar()) + EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); + else + StoreComplexToAddr(Val.getComplexVal(), Temp, false); + Val = RValue::get(Temp); + } + + return Val; +} + + +/// getAccessedFieldNo - Given an encoded value and a result number, return +/// the input field number being accessed. +unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, + const llvm::Constant *Elts) { + if (isa<llvm::ConstantAggregateZero>(Elts)) + return 0; + + return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); +} + + +//===----------------------------------------------------------------------===// +// LValue Expression Emission +//===----------------------------------------------------------------------===// + +RValue CodeGenFunction::GetUndefRValue(QualType Ty) { + if (Ty->isVoidType()) { + return RValue::get(0); + } else if (const ComplexType *CTy = Ty->getAsComplexType()) { + const llvm::Type *EltTy = ConvertType(CTy->getElementType()); + llvm::Value *U = llvm::UndefValue::get(EltTy); + return RValue::getComplex(std::make_pair(U, U)); + } else if (hasAggregateLLVMType(Ty)) { + const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty)); + return RValue::getAggregate(llvm::UndefValue::get(LTy)); + } else { + return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); + } +} + +RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, + const char *Name) { + ErrorUnsupported(E, Name); + return GetUndefRValue(E->getType()); +} + +LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, + const char *Name) { + ErrorUnsupported(E, Name); + llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); + return LValue::MakeAddr(llvm::UndefValue::get(Ty), + E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +/// EmitLValue - Emit code to compute a designator that specifies the location +/// of the expression. +/// +/// This can return one of two things: a simple address or a bitfield +/// reference. In either case, the LLVM Value* in the LValue structure is +/// guaranteed to be an LLVM pointer type. +/// +/// If this returns a bitfield reference, nothing about the pointee type of +/// the LLVM value is known: For example, it may not be a pointer to an +/// integer. +/// +/// If this returns a normal address, and if the lvalue's C type is fixed +/// size, this method guarantees that the returned pointer type will point to +/// an LLVM type of the same size of the lvalue's type. If the lvalue has a +/// variable length type, this is not possible. +/// +LValue CodeGenFunction::EmitLValue(const Expr *E) { + switch (E->getStmtClass()) { + default: return EmitUnsupportedLValue(E, "l-value expression"); + + case Expr::BinaryOperatorClass: + return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); + case Expr::CallExprClass: + case Expr::CXXOperatorCallExprClass: + return EmitCallExprLValue(cast<CallExpr>(E)); + case Expr::VAArgExprClass: + return EmitVAArgExprLValue(cast<VAArgExpr>(E)); + case Expr::DeclRefExprClass: + case Expr::QualifiedDeclRefExprClass: + return EmitDeclRefLValue(cast<DeclRefExpr>(E)); + case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); + case Expr::PredefinedExprClass: + return EmitPredefinedLValue(cast<PredefinedExpr>(E)); + case Expr::StringLiteralClass: + return EmitStringLiteralLValue(cast<StringLiteral>(E)); + case Expr::ObjCEncodeExprClass: + return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); + + case Expr::BlockDeclRefExprClass: + return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); + + case Expr::CXXConditionDeclExprClass: + return EmitCXXConditionDeclLValue(cast<CXXConditionDeclExpr>(E)); + case Expr::CXXTemporaryObjectExprClass: + case Expr::CXXConstructExprClass: + return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); + case Expr::CXXBindTemporaryExprClass: + return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); + + case Expr::ObjCMessageExprClass: + return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); + case Expr::ObjCIvarRefExprClass: + return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); + case Expr::ObjCPropertyRefExprClass: + return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); + case Expr::ObjCKVCRefExprClass: + return EmitObjCKVCRefLValue(cast<ObjCKVCRefExpr>(E)); + case Expr::ObjCSuperExprClass: + return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E)); + + case Expr::StmtExprClass: + return EmitStmtExprLValue(cast<StmtExpr>(E)); + case Expr::UnaryOperatorClass: + return EmitUnaryOpLValue(cast<UnaryOperator>(E)); + case Expr::ArraySubscriptExprClass: + return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); + case Expr::ExtVectorElementExprClass: + return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); + case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); + case Expr::CompoundLiteralExprClass: + return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); + case Expr::ConditionalOperatorClass: + return EmitConditionalOperator(cast<ConditionalOperator>(E)); + case Expr::ChooseExprClass: + return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext())); + case Expr::ImplicitCastExprClass: + case Expr::CStyleCastExprClass: + case Expr::CXXFunctionalCastExprClass: + case Expr::CXXStaticCastExprClass: + case Expr::CXXDynamicCastExprClass: + case Expr::CXXReinterpretCastExprClass: + case Expr::CXXConstCastExprClass: + return EmitCastLValue(cast<CastExpr>(E)); + } +} + +llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, + QualType Ty) { + llvm::Value *V = Builder.CreateLoad(Addr, Volatile, "tmp"); + + // Bool can have different representation in memory than in registers. + if (Ty->isBooleanType()) + if (V->getType() != llvm::Type::Int1Ty) + V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); + + return V; +} + +void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, + bool Volatile, QualType Ty) { + + if (Ty->isBooleanType()) { + // Bool can have different representation in memory than in registers. + const llvm::Type *SrcTy = Value->getType(); + const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); + if (DstPtr->getElementType() != SrcTy) { + const llvm::Type *MemTy = + llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); + Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); + } + } + + Builder.CreateStore(Value, Addr, Volatile); +} + +/// EmitLoadOfLValue - Given an expression that represents a value lvalue, +/// this method emits the address of the lvalue, then loads the result as an +/// rvalue, returning the rvalue. +RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { + if (LV.isObjCWeak()) { + // load of a __weak object. + llvm::Value *AddrWeakObj = LV.getAddress(); + llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, + AddrWeakObj); + return RValue::get(read_weak); + } + + if (LV.isSimple()) { + llvm::Value *Ptr = LV.getAddress(); + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + + // Simple scalar l-value. + if (EltTy->isSingleValueType()) + return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), + ExprType)); + + assert(ExprType->isFunctionType() && "Unknown scalar value"); + return RValue::get(Ptr); + } + + if (LV.isVectorElt()) { + llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), + LV.isVolatileQualified(), "tmp"); + return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), + "vecext")); + } + + // If this is a reference to a subset of the elements of a vector, either + // shuffle the input or extract/insert them as appropriate. + if (LV.isExtVectorElt()) + return EmitLoadOfExtVectorElementLValue(LV, ExprType); + + if (LV.isBitfield()) + return EmitLoadOfBitfieldLValue(LV, ExprType); + + if (LV.isPropertyRef()) + return EmitLoadOfPropertyRefLValue(LV, ExprType); + + assert(LV.isKVCRef() && "Unknown LValue type!"); + return EmitLoadOfKVCRefLValue(LV, ExprType); +} + +RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, + QualType ExprType) { + unsigned StartBit = LV.getBitfieldStartBit(); + unsigned BitfieldSize = LV.getBitfieldSize(); + llvm::Value *Ptr = LV.getBitfieldAddr(); + + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); + + // In some cases the bitfield may straddle two memory locations. + // Currently we load the entire bitfield, then do the magic to + // sign-extend it if necessary. This results in somewhat more code + // than necessary for the common case (one load), since two shifts + // accomplish both the masking and sign extension. + unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); + llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); + + // Shift to proper location. + if (StartBit) + Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), + "bf.lo"); + + // Mask off unused bits. + llvm::Constant *LowMask = + llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, LowBits)); + Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); + + // Fetch the high bits if necessary. + if (LowBits < BitfieldSize) { + unsigned HighBits = BitfieldSize - LowBits; + llvm::Value *HighPtr = + Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), + "bf.ptr.hi"); + llvm::Value *HighVal = Builder.CreateLoad(HighPtr, + LV.isVolatileQualified(), + "tmp"); + + // Mask off unused bits. + llvm::Constant *HighMask = + llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, HighBits)); + HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); + + // Shift to proper location and or in to bitfield value. + HighVal = Builder.CreateShl(HighVal, + llvm::ConstantInt::get(EltTy, LowBits)); + Val = Builder.CreateOr(Val, HighVal, "bf.val"); + } + + // Sign extend if necessary. + if (LV.isBitfieldSigned()) { + llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, + EltTySize - BitfieldSize); + Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), + ExtraBits, "bf.val.sext"); + } + + // The bitfield type and the normal type differ when the storage sizes + // differ (currently just _Bool). + Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); + + return RValue::get(Val); +} + +RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV, + QualType ExprType) { + return EmitObjCPropertyGet(LV.getPropertyRefExpr()); +} + +RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV, + QualType ExprType) { + return EmitObjCPropertyGet(LV.getKVCRefExpr()); +} + +// If this is a reference to a subset of the elements of a vector, create an +// appropriate shufflevector. +RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, + QualType ExprType) { + llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), + LV.isVolatileQualified(), "tmp"); + + const llvm::Constant *Elts = LV.getExtVectorElts(); + + // If the result of the expression is a non-vector type, we must be + // extracting a single element. Just codegen as an extractelement. + const VectorType *ExprVT = ExprType->getAsVectorType(); + if (!ExprVT) { + unsigned InIdx = getAccessedFieldNo(0, Elts); + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); + } + + // Always use shuffle vector to try to retain the original program structure + unsigned NumResultElts = ExprVT->getNumElements(); + + llvm::SmallVector<llvm::Constant*, 4> Mask; + for (unsigned i = 0; i != NumResultElts; ++i) { + unsigned InIdx = getAccessedFieldNo(i, Elts); + Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); + } + + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); + Vec = Builder.CreateShuffleVector(Vec, + llvm::UndefValue::get(Vec->getType()), + MaskV, "tmp"); + return RValue::get(Vec); +} + + + +/// EmitStoreThroughLValue - Store the specified rvalue into the specified +/// lvalue, where both are guaranteed to the have the same type, and that type +/// is 'Ty'. +void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, + QualType Ty) { + if (!Dst.isSimple()) { + if (Dst.isVectorElt()) { + // Read/modify/write the vector, inserting the new element. + llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), + Dst.isVolatileQualified(), "tmp"); + Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), + Dst.getVectorIdx(), "vecins"); + Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); + return; + } + + // If this is an update of extended vector elements, insert them as + // appropriate. + if (Dst.isExtVectorElt()) + return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty); + + if (Dst.isBitfield()) + return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); + + if (Dst.isPropertyRef()) + return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty); + + if (Dst.isKVCRef()) + return EmitStoreThroughKVCRefLValue(Src, Dst, Ty); + + assert(0 && "Unknown LValue type"); + } + + if (Dst.isObjCWeak() && !Dst.isNonGC()) { + // load of a __weak object. + llvm::Value *LvalueDst = Dst.getAddress(); + llvm::Value *src = Src.getScalarVal(); + CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); + return; + } + + if (Dst.isObjCStrong() && !Dst.isNonGC()) { + // load of a __strong object. + llvm::Value *LvalueDst = Dst.getAddress(); + llvm::Value *src = Src.getScalarVal(); +#if 0 + // FIXME. We cannot positively determine if we have an 'ivar' assignment, + // object assignment or an unknown assignment. For now, generate call to + // objc_assign_strongCast assignment which is a safe, but consevative + // assumption. + if (Dst.isObjCIvar()) + CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, LvalueDst); + else + CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); +#endif + if (Dst.isGlobalObjCRef()) + CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst); + else + CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); + return; + } + + assert(Src.isScalar() && "Can't emit an agg store with this method"); + EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), + Dst.isVolatileQualified(), Ty); +} + +void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + QualType Ty, + llvm::Value **Result) { + unsigned StartBit = Dst.getBitfieldStartBit(); + unsigned BitfieldSize = Dst.getBitfieldSize(); + llvm::Value *Ptr = Dst.getBitfieldAddr(); + + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); + + // Get the new value, cast to the appropriate type and masked to + // exactly the size of the bit-field. + llvm::Value *SrcVal = Src.getScalarVal(); + llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); + llvm::Constant *Mask = + llvm::ConstantInt::get(llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize)); + NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value"); + + // Return the new value of the bit-field, if requested. + if (Result) { + // Cast back to the proper type for result. + const llvm::Type *SrcTy = SrcVal->getType(); + llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false, + "bf.reload.val"); + + // Sign extend if necessary. + if (Dst.isBitfieldSigned()) { + unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); + llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, + SrcTySize - BitfieldSize); + SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), + ExtraBits, "bf.reload.sext"); + } + + *Result = SrcTrunc; + } + + // In some cases the bitfield may straddle two memory locations. + // Emit the low part first and check to see if the high needs to be + // done. + unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); + llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), + "bf.prev.low"); + + // Compute the mask for zero-ing the low part of this bitfield. + llvm::Constant *InvMask = + llvm::ConstantInt::get(~llvm::APInt::getBitsSet(EltTySize, StartBit, + StartBit + LowBits)); + + // Compute the new low part as + // LowVal = (LowVal & InvMask) | (NewVal << StartBit), + // with the shift of NewVal implicitly stripping the high bits. + llvm::Value *NewLowVal = + Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), + "bf.value.lo"); + LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); + LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); + + // Write back. + Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); + + // If the low part doesn't cover the bitfield emit a high part. + if (LowBits < BitfieldSize) { + unsigned HighBits = BitfieldSize - LowBits; + llvm::Value *HighPtr = + Builder.CreateGEP(Ptr, llvm::ConstantInt::get(llvm::Type::Int32Ty, 1), + "bf.ptr.hi"); + llvm::Value *HighVal = Builder.CreateLoad(HighPtr, + Dst.isVolatileQualified(), + "bf.prev.hi"); + + // Compute the mask for zero-ing the high part of this bitfield. + llvm::Constant *InvMask = + llvm::ConstantInt::get(~llvm::APInt::getLowBitsSet(EltTySize, HighBits)); + + // Compute the new high part as + // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), + // where the high bits of NewVal have already been cleared and the + // shift stripping the low bits. + llvm::Value *NewHighVal = + Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), + "bf.value.high"); + HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); + HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); + + // Write back. + Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); + } +} + +void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src, + LValue Dst, + QualType Ty) { + EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src); +} + +void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src, + LValue Dst, + QualType Ty) { + EmitObjCPropertySet(Dst.getKVCRefExpr(), Src); +} + +void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, + LValue Dst, + QualType Ty) { + // This access turns into a read/modify/write of the vector. Load the input + // value now. + llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), + Dst.isVolatileQualified(), "tmp"); + const llvm::Constant *Elts = Dst.getExtVectorElts(); + + llvm::Value *SrcVal = Src.getScalarVal(); + + if (const VectorType *VTy = Ty->getAsVectorType()) { + unsigned NumSrcElts = VTy->getNumElements(); + unsigned NumDstElts = + cast<llvm::VectorType>(Vec->getType())->getNumElements(); + if (NumDstElts == NumSrcElts) { + // Use shuffle vector is the src and destination are the same number + // of elements + llvm::SmallVector<llvm::Constant*, 4> Mask; + for (unsigned i = 0; i != NumSrcElts; ++i) { + unsigned InIdx = getAccessedFieldNo(i, Elts); + Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); + } + + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); + Vec = Builder.CreateShuffleVector(SrcVal, + llvm::UndefValue::get(Vec->getType()), + MaskV, "tmp"); + } + else if (NumDstElts > NumSrcElts) { + // Extended the source vector to the same length and then shuffle it + // into the destination. + // FIXME: since we're shuffling with undef, can we just use the indices + // into that? This could be simpler. + llvm::SmallVector<llvm::Constant*, 4> ExtMask; + unsigned i; + for (i = 0; i != NumSrcElts; ++i) + ExtMask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); + for (; i != NumDstElts; ++i) + ExtMask.push_back(llvm::UndefValue::get(llvm::Type::Int32Ty)); + llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], + ExtMask.size()); + llvm::Value *ExtSrcVal = + Builder.CreateShuffleVector(SrcVal, + llvm::UndefValue::get(SrcVal->getType()), + ExtMaskV, "tmp"); + // build identity + llvm::SmallVector<llvm::Constant*, 4> Mask; + for (unsigned i = 0; i != NumDstElts; ++i) { + Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, i)); + } + // modify when what gets shuffled in + for (unsigned i = 0; i != NumSrcElts; ++i) { + unsigned Idx = getAccessedFieldNo(i, Elts); + Mask[Idx] =llvm::ConstantInt::get(llvm::Type::Int32Ty, i+NumDstElts); + } + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); + Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp"); + } + else { + // We should never shorten the vector + assert(0 && "unexpected shorten vector length"); + } + } else { + // If the Src is a scalar (not a vector) it must be updating one element. + unsigned InIdx = getAccessedFieldNo(0, Elts); + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); + } + + Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); +} + +LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { + const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); + + if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || + isa<ImplicitParamDecl>(VD))) { + LValue LV; + bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); + if (VD->hasExternalStorage()) { + llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); + if (VD->getType()->isReferenceType()) + V = Builder.CreateLoad(V, "tmp"); + LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + } + else { + llvm::Value *V = LocalDeclMap[VD]; + assert(V && "DeclRefExpr not entered in LocalDeclMap?"); + // local variables do not get their gc attribute set. + QualType::GCAttrTypes attr = QualType::GCNone; + // local static? + if (!NonGCable) + attr = getContext().getObjCGCAttrKind(E->getType()); + if (VD->hasAttr<BlocksAttr>()) { + bool needsCopyDispose = BlockRequiresCopying(VD->getType()); + const llvm::Type *PtrStructTy = V->getType(); + const llvm::Type *Ty = PtrStructTy; + Ty = llvm::PointerType::get(Ty, 0); + V = Builder.CreateStructGEP(V, 1, "forwarding"); + V = Builder.CreateBitCast(V, Ty); + V = Builder.CreateLoad(V, false); + V = Builder.CreateBitCast(V, PtrStructTy); + V = Builder.CreateStructGEP(V, needsCopyDispose*2 + 4, "x"); + } + if (VD->getType()->isReferenceType()) + V = Builder.CreateLoad(V, "tmp"); + LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), attr); + } + LValue::SetObjCNonGC(LV, NonGCable); + return LV; + } else if (VD && VD->isFileVarDecl()) { + llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); + if (VD->getType()->isReferenceType()) + V = Builder.CreateLoad(V, "tmp"); + LValue LV = LValue::MakeAddr(V, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + if (LV.isObjCStrong()) + LV.SetGlobalObjCRef(LV, true); + return LV; + } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl())) { + llvm::Value* V = CGM.GetAddrOfFunction(GlobalDecl(FD)); + if (!FD->hasPrototype()) { + if (const FunctionProtoType *Proto = + FD->getType()->getAsFunctionProtoType()) { + // Ugly case: for a K&R-style definition, the type of the definition + // isn't the same as the type of a use. Correct for this with a + // bitcast. + QualType NoProtoType = + getContext().getFunctionNoProtoType(Proto->getResultType()); + NoProtoType = getContext().getPointerType(NoProtoType); + V = Builder.CreateBitCast(V, ConvertType(NoProtoType), "tmp"); + } + } + return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + } + else if (const ImplicitParamDecl *IPD = + dyn_cast<ImplicitParamDecl>(E->getDecl())) { + llvm::Value *V = LocalDeclMap[IPD]; + assert(V && "BlockVarDecl not entered in LocalDeclMap?"); + return LValue::MakeAddr(V, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + } + assert(0 && "Unimp declref"); + //an invalid LValue, but the assert will + //ensure that this point is never reached. + return LValue(); +} + +LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { + return LValue::MakeAddr(GetAddrOfBlockDecl(E), + E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + if (E->getOpcode() == UnaryOperator::Extension) + return EmitLValue(E->getSubExpr()); + + QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); + switch (E->getOpcode()) { + default: assert(0 && "Unknown unary operator lvalue!"); + case UnaryOperator::Deref: + { + QualType T = + E->getSubExpr()->getType()->getAsPointerType()->getPointeeType(); + LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), + ExprTy->getAsPointerType()->getPointeeType() + .getCVRQualifiers(), + getContext().getObjCGCAttrKind(T)); + // We should not generate __weak write barrier on indirect reference + // of a pointer to object; as in void foo (__weak id *param); *param = 0; + // But, we continue to generate __strong write barrier on indirect write + // into a pointer to object. + if (getContext().getLangOptions().ObjC1 && + getContext().getLangOptions().getGCMode() != LangOptions::NonGC && + LV.isObjCWeak()) + LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); + return LV; + } + case UnaryOperator::Real: + case UnaryOperator::Imag: + LValue LV = EmitLValue(E->getSubExpr()); + unsigned Idx = E->getOpcode() == UnaryOperator::Imag; + return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(), + Idx, "idx"), + ExprTy.getCVRQualifiers()); + } +} + +LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { + return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E), 0); +} + +LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { + return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E), 0); +} + + +LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { + std::string GlobalVarName; + + switch (Type) { + default: + assert(0 && "Invalid type"); + case PredefinedExpr::Func: + GlobalVarName = "__func__."; + break; + case PredefinedExpr::Function: + GlobalVarName = "__FUNCTION__."; + break; + case PredefinedExpr::PrettyFunction: + // FIXME:: Demangle C++ method names + GlobalVarName = "__PRETTY_FUNCTION__."; + break; + } + + // FIXME: This isn't right at all. The logic for computing this should go + // into a method on PredefinedExpr. This would allow sema and codegen to be + // consistent for things like sizeof(__func__) etc. + std::string FunctionName; + if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { + FunctionName = CGM.getMangledName(FD); + } else { + // Just get the mangled name; skipping the asm prefix if it + // exists. + FunctionName = CurFn->getName(); + if (FunctionName[0] == '\01') + FunctionName = FunctionName.substr(1, std::string::npos); + } + + GlobalVarName += FunctionName; + llvm::Constant *C = + CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); + return LValue::MakeAddr(C, 0); +} + +LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { + switch (E->getIdentType()) { + default: + return EmitUnsupportedLValue(E, "predefined expression"); + case PredefinedExpr::Func: + case PredefinedExpr::Function: + case PredefinedExpr::PrettyFunction: + return EmitPredefinedFunctionName(E->getIdentType()); + } +} + +LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { + // The index must always be an integer, which is not an aggregate. Emit it. + llvm::Value *Idx = EmitScalarExpr(E->getIdx()); + + // If the base is a vector type, then we are forming a vector element lvalue + // with this subscript. + if (E->getBase()->getType()->isVectorType()) { + // Emit the vector as an lvalue to get its address. + LValue LHS = EmitLValue(E->getBase()); + assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); + // FIXME: This should properly sign/zero/extend or truncate Idx to i32. + return LValue::MakeVectorElt(LHS.getAddress(), Idx, + E->getBase()->getType().getCVRQualifiers()); + } + + // The base must be a pointer, which is not an aggregate. Emit it. + llvm::Value *Base = EmitScalarExpr(E->getBase()); + + // Extend or truncate the index type to 32 or 64-bits. + QualType IdxTy = E->getIdx()->getType(); + bool IdxSigned = IdxTy->isSignedIntegerType(); + unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (IdxBitwidth != LLVMPointerWidth) + Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth), + IdxSigned, "idxprom"); + + // We know that the pointer points to a type of the correct size, + // unless the size is a VLA or Objective-C interface. + llvm::Value *Address = 0; + if (const VariableArrayType *VAT = + getContext().getAsVariableArrayType(E->getType())) { + llvm::Value *VLASize = VLASizeMap[VAT]; + + Idx = Builder.CreateMul(Idx, VLASize); + + QualType BaseType = getContext().getBaseElementType(VAT); + + uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; + Idx = Builder.CreateUDiv(Idx, + llvm::ConstantInt::get(Idx->getType(), + BaseTypeSize)); + Address = Builder.CreateGEP(Base, Idx, "arrayidx"); + } else if (const ObjCInterfaceType *OIT = + dyn_cast<ObjCInterfaceType>(E->getType())) { + llvm::Value *InterfaceSize = + llvm::ConstantInt::get(Idx->getType(), + getContext().getTypeSize(OIT) / 8); + + Idx = Builder.CreateMul(Idx, InterfaceSize); + + llvm::Type *i8PTy = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy), + Idx, "arrayidx"); + Address = Builder.CreateBitCast(Address, Base->getType()); + } else { + Address = Builder.CreateGEP(Base, Idx, "arrayidx"); + } + + QualType T = E->getBase()->getType()->getAsPointerType()->getPointeeType(); + LValue LV = LValue::MakeAddr(Address, + T.getCVRQualifiers(), + getContext().getObjCGCAttrKind(T)); + if (getContext().getLangOptions().ObjC1 && + getContext().getLangOptions().getGCMode() != LangOptions::NonGC) + LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext())); + return LV; +} + +static +llvm::Constant *GenerateConstantVector(llvm::SmallVector<unsigned, 4> &Elts) { + llvm::SmallVector<llvm::Constant *, 4> CElts; + + for (unsigned i = 0, e = Elts.size(); i != e; ++i) + CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, Elts[i])); + + return llvm::ConstantVector::get(&CElts[0], CElts.size()); +} + +LValue CodeGenFunction:: +EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { + // Emit the base vector as an l-value. + LValue Base; + + // ExtVectorElementExpr's base can either be a vector or pointer to vector. + if (!E->isArrow()) { + assert(E->getBase()->getType()->isVectorType()); + Base = EmitLValue(E->getBase()); + } else { + const PointerType *PT = E->getBase()->getType()->getAsPointerType(); + llvm::Value *Ptr = EmitScalarExpr(E->getBase()); + Base = LValue::MakeAddr(Ptr, PT->getPointeeType().getCVRQualifiers()); + } + + // Encode the element access list into a vector of unsigned indices. + llvm::SmallVector<unsigned, 4> Indices; + E->getEncodedElementAccess(Indices); + + if (Base.isSimple()) { + llvm::Constant *CV = GenerateConstantVector(Indices); + return LValue::MakeExtVectorElt(Base.getAddress(), CV, + Base.getQualifiers()); + } + assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); + + llvm::Constant *BaseElts = Base.getExtVectorElts(); + llvm::SmallVector<llvm::Constant *, 4> CElts; + + for (unsigned i = 0, e = Indices.size(); i != e; ++i) { + if (isa<llvm::ConstantAggregateZero>(BaseElts)) + CElts.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + else + CElts.push_back(BaseElts->getOperand(Indices[i])); + } + llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size()); + return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, + Base.getQualifiers()); +} + +LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { + bool isUnion = false; + bool isIvar = false; + bool isNonGC = false; + Expr *BaseExpr = E->getBase(); + llvm::Value *BaseValue = NULL; + unsigned CVRQualifiers=0; + + // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. + if (E->isArrow()) { + BaseValue = EmitScalarExpr(BaseExpr); + const PointerType *PTy = + BaseExpr->getType()->getAsPointerType(); + if (PTy->getPointeeType()->isUnionType()) + isUnion = true; + CVRQualifiers = PTy->getPointeeType().getCVRQualifiers(); + } else if (isa<ObjCPropertyRefExpr>(BaseExpr) || + isa<ObjCKVCRefExpr>(BaseExpr)) { + RValue RV = EmitObjCPropertyGet(BaseExpr); + BaseValue = RV.getAggregateAddr(); + if (BaseExpr->getType()->isUnionType()) + isUnion = true; + CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); + } else { + LValue BaseLV = EmitLValue(BaseExpr); + if (BaseLV.isObjCIvar()) + isIvar = true; + if (BaseLV.isNonGC()) + isNonGC = true; + // FIXME: this isn't right for bitfields. + BaseValue = BaseLV.getAddress(); + if (BaseExpr->getType()->isUnionType()) + isUnion = true; + CVRQualifiers = BaseExpr->getType().getCVRQualifiers(); + } + + FieldDecl *Field = dyn_cast<FieldDecl>(E->getMemberDecl()); + // FIXME: Handle non-field member expressions + assert(Field && "No code generation for non-field member references"); + LValue MemExpLV = EmitLValueForField(BaseValue, Field, isUnion, + CVRQualifiers); + LValue::SetObjCIvar(MemExpLV, isIvar); + LValue::SetObjCNonGC(MemExpLV, isNonGC); + return MemExpLV; +} + +LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, + FieldDecl* Field, + unsigned CVRQualifiers) { + unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); + // FIXME: CodeGenTypes should expose a method to get the appropriate type for + // FieldTy (the appropriate type is ABI-dependent). + const llvm::Type *FieldTy = + CGM.getTypes().ConvertTypeForMem(Field->getType()); + const llvm::PointerType *BaseTy = + cast<llvm::PointerType>(BaseValue->getType()); + unsigned AS = BaseTy->getAddressSpace(); + BaseValue = Builder.CreateBitCast(BaseValue, + llvm::PointerType::get(FieldTy, AS), + "tmp"); + llvm::Value *V = Builder.CreateGEP(BaseValue, + llvm::ConstantInt::get(llvm::Type::Int32Ty, idx), + "tmp"); + + CodeGenTypes::BitFieldInfo bitFieldInfo = + CGM.getTypes().getBitFieldInfo(Field); + return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size, + Field->getType()->isSignedIntegerType(), + Field->getType().getCVRQualifiers()|CVRQualifiers); +} + +LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, + FieldDecl* Field, + bool isUnion, + unsigned CVRQualifiers) +{ + if (Field->isBitField()) + return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); + + unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); + llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); + + // Match union field type. + if (isUnion) { + const llvm::Type *FieldTy = + CGM.getTypes().ConvertTypeForMem(Field->getType()); + const llvm::PointerType * BaseTy = + cast<llvm::PointerType>(BaseValue->getType()); + unsigned AS = BaseTy->getAddressSpace(); + V = Builder.CreateBitCast(V, + llvm::PointerType::get(FieldTy, AS), + "tmp"); + } + if (Field->getType()->isReferenceType()) + V = Builder.CreateLoad(V, "tmp"); + + QualType::GCAttrTypes attr = QualType::GCNone; + if (CGM.getLangOptions().ObjC1 && + CGM.getLangOptions().getGCMode() != LangOptions::NonGC) { + QualType Ty = Field->getType(); + attr = Ty.getObjCGCAttr(); + if (attr != QualType::GCNone) { + // __weak attribute on a field is ignored. + if (attr == QualType::Weak) + attr = QualType::GCNone; + } + else if (getContext().isObjCObjectPointerType(Ty)) + attr = QualType::Strong; + } + LValue LV = + LValue::MakeAddr(V, + Field->getType().getCVRQualifiers()|CVRQualifiers, + attr); + return LV; +} + +LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){ + const llvm::Type *LTy = ConvertType(E->getType()); + llvm::Value *DeclPtr = CreateTempAlloca(LTy, ".compoundliteral"); + + const Expr* InitExpr = E->getInitializer(); + LValue Result = LValue::MakeAddr(DeclPtr, E->getType().getCVRQualifiers()); + + if (E->getType()->isComplexType()) { + EmitComplexExprIntoAddr(InitExpr, DeclPtr, false); + } else if (hasAggregateLLVMType(E->getType())) { + EmitAnyExpr(InitExpr, DeclPtr, false); + } else { + EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType()); + } + + return Result; +} + +LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { + // We don't handle vectors yet. + if (E->getType()->isVectorType()) + return EmitUnsupportedLValue(E, "conditional operator"); + + // ?: here should be an aggregate. + assert((hasAggregateLLVMType(E->getType()) && + !E->getType()->isAnyComplexType()) && + "Unexpected conditional operator!"); + + llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); + EmitAggExpr(E, Temp, false); + + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + +} + +/// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code +/// generator in an lvalue context, then it must mean that we need the address +/// of an aggregate in order to access one of its fields. This can happen for +/// all the reasons that casts are permitted with aggregate result, including +/// noop aggregate casts, and cast from scalar to union. +LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { + // If this is an aggregate-to-aggregate cast, just use the input's address as + // the lvalue. + if (getContext().hasSameUnqualifiedType(E->getType(), + E->getSubExpr()->getType())) + return EmitLValue(E->getSubExpr()); + + // Otherwise, we must have a cast from scalar to union. + assert(E->getType()->isUnionType() && "Expected scalar-to-union cast"); + + // Casts are only lvalues when the source and destination types are the same. + llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); + EmitAnyExpr(E->getSubExpr(), Temp, false); + + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +//===--------------------------------------------------------------------===// +// Expression Emission +//===--------------------------------------------------------------------===// + + +RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { + // Builtins never have block type. + if (E->getCallee()->getType()->isBlockPointerType()) + return EmitBlockCallExpr(E); + + if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) + return EmitCXXMemberCallExpr(CE); + + const Decl *TargetDecl = 0; + if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { + if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { + TargetDecl = DRE->getDecl(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl)) + if (unsigned builtinID = FD->getBuiltinID(getContext())) + return EmitBuiltinExpr(FD, builtinID, E); + } + } + + if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) { + if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) + return EmitCXXOperatorMemberCallExpr(CE, MD); + } + + llvm::Value *Callee = EmitScalarExpr(E->getCallee()); + return EmitCall(Callee, E->getCallee()->getType(), + E->arg_begin(), E->arg_end(), TargetDecl); +} + +LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { + // Comma expressions just emit their LHS then their RHS as an l-value. + if (E->getOpcode() == BinaryOperator::Comma) { + EmitAnyExpr(E->getLHS()); + return EmitLValue(E->getRHS()); + } + + // Can only get l-value for binary operator expressions which are a + // simple assignment of aggregate type. + if (E->getOpcode() != BinaryOperator::Assign) + return EmitUnsupportedLValue(E, "binary l-value expression"); + + llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); + EmitAggExpr(E, Temp, false); + // FIXME: Are these qualifiers correct? + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { + RValue RV = EmitCallExpr(E); + + if (RV.isScalar()) { + assert(E->getCallReturnType()->isReferenceType() && + "Can't have a scalar return unless the return type is a " + "reference type!"); + + return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); + } + + return LValue::MakeAddr(RV.getAggregateAddr(), + E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { + // FIXME: This shouldn't require another copy. + llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); + EmitAggExpr(E, Temp, false); + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); +} + +LValue +CodeGenFunction::EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E) { + EmitLocalBlockVarDecl(*E->getVarDecl()); + return EmitDeclRefLValue(E); +} + +LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { + llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "tmp"); + EmitCXXConstructExpr(Temp, E); + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers()); +} + +LValue +CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { + LValue LV = EmitLValue(E->getSubExpr()); + + PushCXXTemporary(E->getTemporary(), LV.getAddress()); + + return LV; +} + +LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { + // Can only get l-value for message expression returning aggregate type + RValue RV = EmitObjCMessageExpr(E); + // FIXME: can this be volatile? + return LValue::MakeAddr(RV.getAggregateAddr(), + E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + +llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar) { + return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); +} + +LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers) { + return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, + Ivar, CVRQualifiers); +} + +LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { + // FIXME: A lot of the code below could be shared with EmitMemberExpr. + llvm::Value *BaseValue = 0; + const Expr *BaseExpr = E->getBase(); + unsigned CVRQualifiers = 0; + QualType ObjectTy; + if (E->isArrow()) { + BaseValue = EmitScalarExpr(BaseExpr); + const PointerType *PTy = BaseExpr->getType()->getAsPointerType(); + ObjectTy = PTy->getPointeeType(); + CVRQualifiers = ObjectTy.getCVRQualifiers(); + } else { + LValue BaseLV = EmitLValue(BaseExpr); + // FIXME: this isn't right for bitfields. + BaseValue = BaseLV.getAddress(); + ObjectTy = BaseExpr->getType(); + CVRQualifiers = ObjectTy.getCVRQualifiers(); + } + + return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers); +} + +LValue +CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { + // This is a special l-value that just issues sends when we load or + // store through it. + return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); +} + +LValue +CodeGenFunction::EmitObjCKVCRefLValue(const ObjCKVCRefExpr *E) { + // This is a special l-value that just issues sends when we load or + // store through it. + return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); +} + +LValue +CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { + return EmitUnsupportedLValue(E, "use of super"); +} + +LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { + + // Can only get l-value for message expression returning aggregate type + RValue RV = EmitAnyExprToTemp(E); + // FIXME: can this be volatile? + return LValue::MakeAddr(RV.getAggregateAddr(), + E->getType().getCVRQualifiers(), + getContext().getObjCGCAttrKind(E->getType())); +} + + +RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, + CallExpr::const_arg_iterator ArgBeg, + CallExpr::const_arg_iterator ArgEnd, + const Decl *TargetDecl) { + // Get the actual function type. The callee type will always be a + // pointer to function type or a block pointer type. + assert(CalleeType->isFunctionPointerType() && + "Call must have function pointer type!"); + + QualType FnType = CalleeType->getAsPointerType()->getPointeeType(); + QualType ResultType = FnType->getAsFunctionType()->getResultType(); + + CallArgList Args; + EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); + + return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), + Callee, Args, TargetDecl); +} diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp new file mode 100644 index 000000000000..469c8306b9dd --- /dev/null +++ b/lib/CodeGen/CGExprAgg.cpp @@ -0,0 +1,554 @@ +//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Aggregate Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Intrinsics.h" +using namespace clang; +using namespace CodeGen; + +//===----------------------------------------------------------------------===// +// Aggregate Expression Emitter +//===----------------------------------------------------------------------===// + +namespace { +class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> { + CodeGenFunction &CGF; + CGBuilderTy &Builder; + llvm::Value *DestPtr; + bool VolatileDest; + bool IgnoreResult; + +public: + AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v, + bool ignore) + : CGF(cgf), Builder(CGF.Builder), + DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore) { + } + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// EmitAggLoadOfLValue - Given an expression with aggregate type that + /// represents a value lvalue, this method emits the address of the lvalue, + /// then loads the result into DestPtr. + void EmitAggLoadOfLValue(const Expr *E); + + /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. + void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false); + void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + void VisitStmt(Stmt *S) { + CGF.ErrorUnsupported(S, "aggregate expression"); + } + void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } + void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } + + // l-values. + void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); } + void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } + void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } + void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } + void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + EmitAggLoadOfLValue(E); + } + void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + EmitAggLoadOfLValue(E); + } + void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { + EmitAggLoadOfLValue(E); + } + void VisitPredefinedExpr(const PredefinedExpr *E) { + EmitAggLoadOfLValue(E); + } + + // Operators. + void VisitCStyleCastExpr(CStyleCastExpr *E); + void VisitImplicitCastExpr(ImplicitCastExpr *E); + void VisitCallExpr(const CallExpr *E); + void VisitStmtExpr(const StmtExpr *E); + void VisitBinaryOperator(const BinaryOperator *BO); + void VisitBinAssign(const BinaryOperator *E); + void VisitBinComma(const BinaryOperator *E); + + void VisitObjCMessageExpr(ObjCMessageExpr *E); + void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { + EmitAggLoadOfLValue(E); + } + void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E); + void VisitObjCKVCRefExpr(ObjCKVCRefExpr *E); + + void VisitConditionalOperator(const ConditionalOperator *CO); + void VisitInitListExpr(InitListExpr *E); + void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + Visit(DAE->getExpr()); + } + void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); + void VisitCXXConstructExpr(const CXXConstructExpr *E); + void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E); + + void VisitVAArgExpr(VAArgExpr *E); + + void EmitInitializationToLValue(Expr *E, LValue Address); + void EmitNullInitializationToLValue(LValue Address, QualType T); + // case Expr::ChooseExprClass: + +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// EmitAggLoadOfLValue - Given an expression with aggregate type that +/// represents a value lvalue, this method emits the address of the lvalue, +/// then loads the result into DestPtr. +void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.EmitLValue(E); + EmitFinalDestCopy(E, LV); +} + +/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. +void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) { + assert(Src.isAggregate() && "value must be aggregate value!"); + + // If the result is ignored, don't copy from the value. + if (DestPtr == 0) { + if (!Src.isVolatileQualified() || (IgnoreResult && Ignore)) + return; + // If the source is volatile, we must read from it; to do that, we need + // some place to put it. + DestPtr = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), "agg.tmp"); + } + + // If the result of the assignment is used, copy the LHS there also. + // FIXME: Pass VolatileDest as well. I think we also need to merge volatile + // from the source as well, as we can't eliminate it if either operand + // is volatile, unless copy has volatile for both source and destination.. + CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(), + VolatileDest|Src.isVolatileQualified()); +} + +/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. +void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) { + assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc"); + + EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(), + Src.isVolatileQualified()), + Ignore); +} + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +void AggExprEmitter::VisitCStyleCastExpr(CStyleCastExpr *E) { + // GCC union extension + if (E->getType()->isUnionType()) { + RecordDecl *SD = E->getType()->getAsRecordType()->getDecl(); + LValue FieldLoc = CGF.EmitLValueForField(DestPtr, + *SD->field_begin(CGF.getContext()), + true, 0); + EmitInitializationToLValue(E->getSubExpr(), FieldLoc); + return; + } + + Visit(E->getSubExpr()); +} + +void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) { + assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), + E->getType()) && + "Implicit cast types must be compatible"); + Visit(E->getSubExpr()); +} + +void AggExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType()->isReferenceType()) { + EmitAggLoadOfLValue(E); + return; + } + + RValue RV = CGF.EmitCallExpr(E); + EmitFinalDestCopy(E, RV); +} + +void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { + RValue RV = CGF.EmitObjCMessageExpr(E); + EmitFinalDestCopy(E, RV); +} + +void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { + RValue RV = CGF.EmitObjCPropertyGet(E); + EmitFinalDestCopy(E, RV); +} + +void AggExprEmitter::VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) { + RValue RV = CGF.EmitObjCPropertyGet(E); + EmitFinalDestCopy(E, RV); +} + +void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.EmitAnyExpr(E->getLHS(), 0, false, true); + CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest); +} + +void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { + CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest); +} + +void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { + CGF.ErrorUnsupported(E, "aggregate binary expression"); +} + +void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { + // For an assignment to work, the value on the right has + // to be compatible with the value on the left. + assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), + E->getRHS()->getType()) + && "Invalid assignment"); + LValue LHS = CGF.EmitLValue(E->getLHS()); + + // We have to special case property setters, otherwise we must have + // a simple lvalue (no aggregates inside vectors, bitfields). + if (LHS.isPropertyRef()) { + llvm::Value *AggLoc = DestPtr; + if (!AggLoc) + AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType())); + CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); + CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), + RValue::getAggregate(AggLoc, VolatileDest)); + } + else if (LHS.isKVCRef()) { + llvm::Value *AggLoc = DestPtr; + if (!AggLoc) + AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType())); + CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest); + CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), + RValue::getAggregate(AggLoc, VolatileDest)); + } else { + // Codegen the RHS so that it stores directly into the LHS. + CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified()); + EmitFinalDestCopy(E, LHS, true); + } +} + +void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) { + llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); + llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); + + llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); + Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + assert(E->getLHS() && "Must have LHS for aggregate value"); + + Visit(E->getLHS()); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(RHSBlock); + + Visit(E->getRHS()); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(ContBlock); +} + +void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { + llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); + llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); + + if (!ArgPtr) { + CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); + return; + } + + EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, 0)); +} + +void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { + llvm::Value *Val = DestPtr; + + if (!Val) { + // Create a temporary variable. + Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp"); + + // FIXME: volatile + CGF.EmitAggExpr(E->getSubExpr(), Val, false); + } else + Visit(E->getSubExpr()); + + CGF.PushCXXTemporary(E->getTemporary(), Val); +} + +void +AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { + llvm::Value *Val = DestPtr; + + if (!Val) { + // Create a temporary variable. + Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp"); + } + + CGF.EmitCXXConstructExpr(Val, E); +} + +void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { + CGF.EmitCXXExprWithTemporaries(E, DestPtr, VolatileDest); +} + +void AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) { + // FIXME: Ignore result? + // FIXME: Are initializers affected by volatile? + if (isa<ImplicitValueInitExpr>(E)) { + EmitNullInitializationToLValue(LV, E->getType()); + } else if (E->getType()->isComplexType()) { + CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false); + } else if (CGF.hasAggregateLLVMType(E->getType())) { + CGF.EmitAnyExpr(E, LV.getAddress(), false); + } else { + CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, E->getType()); + } +} + +void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) { + if (!CGF.hasAggregateLLVMType(T)) { + // For non-aggregates, we can store zero + llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T)); + CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T); + } else { + // Otherwise, just memset the whole thing to zero. This is legal + // because in LLVM, all default initializers are guaranteed to have a + // bit pattern of all zeros. + // FIXME: That isn't true for member pointers! + // There's a potential optimization opportunity in combining + // memsets; that would be easy for arrays, but relatively + // difficult for structures with the current code. + CGF.EmitMemSetToZero(LV.getAddress(), T); + } +} + +void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { +#if 0 + // FIXME: Disabled while we figure out what to do about + // test/CodeGen/bitfield.c + // + // If we can, prefer a copy from a global; this is a lot less code for long + // globals, and it's easier for the current optimizers to analyze. + // FIXME: Should we really be doing this? Should we try to avoid cases where + // we emit a global with a lot of zeros? Should we try to avoid short + // globals? + if (E->isConstantInitializer(CGF.getContext(), 0)) { + llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, &CGF); + llvm::GlobalVariable* GV = + new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, "", &CGF.CGM.getModule(), 0); + EmitFinalDestCopy(E, LValue::MakeAddr(GV, 0)); + return; + } +#endif + if (E->hadArrayRangeDesignator()) { + CGF.ErrorUnsupported(E, "GNU array range designator extension"); + } + + // Handle initialization of an array. + if (E->getType()->isArrayType()) { + const llvm::PointerType *APType = + cast<llvm::PointerType>(DestPtr->getType()); + const llvm::ArrayType *AType = + cast<llvm::ArrayType>(APType->getElementType()); + + uint64_t NumInitElements = E->getNumInits(); + + if (E->getNumInits() > 0) { + QualType T1 = E->getType(); + QualType T2 = E->getInit(0)->getType(); + if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) { + EmitAggLoadOfLValue(E->getInit(0)); + return; + } + } + + uint64_t NumArrayElements = AType->getNumElements(); + QualType ElementType = CGF.getContext().getCanonicalType(E->getType()); + ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType(); + + unsigned CVRqualifier = ElementType.getCVRQualifiers(); + + for (uint64_t i = 0; i != NumArrayElements; ++i) { + llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array"); + if (i < NumInitElements) + EmitInitializationToLValue(E->getInit(i), + LValue::MakeAddr(NextVal, CVRqualifier)); + else + EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, CVRqualifier), + ElementType); + } + return; + } + + assert(E->getType()->isRecordType() && "Only support structs/unions here!"); + + // Do struct initialization; this code just sets each individual member + // to the approprate value. This makes bitfield support automatic; + // the disadvantage is that the generated code is more difficult for + // the optimizer, especially with bitfields. + unsigned NumInitElements = E->getNumInits(); + RecordDecl *SD = E->getType()->getAsRecordType()->getDecl(); + unsigned CurInitVal = 0; + + if (E->getType()->isUnionType()) { + // Only initialize one field of a union. The field itself is + // specified by the initializer list. + if (!E->getInitializedFieldInUnion()) { + // Empty union; we have nothing to do. + +#ifndef NDEBUG + // Make sure that it's really an empty and not a failure of + // semantic analysis. + for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()), + FieldEnd = SD->field_end(CGF.getContext()); + Field != FieldEnd; ++Field) + assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); +#endif + return; + } + + // FIXME: volatility + FieldDecl *Field = E->getInitializedFieldInUnion(); + LValue FieldLoc = CGF.EmitLValueForField(DestPtr, Field, true, 0); + + if (NumInitElements) { + // Store the initializer into the field + EmitInitializationToLValue(E->getInit(0), FieldLoc); + } else { + // Default-initialize to null + EmitNullInitializationToLValue(FieldLoc, Field->getType()); + } + + return; + } + + // Here we iterate over the fields; this makes it simpler to both + // default-initialize fields and skip over unnamed fields. + for (RecordDecl::field_iterator Field = SD->field_begin(CGF.getContext()), + FieldEnd = SD->field_end(CGF.getContext()); + Field != FieldEnd; ++Field) { + // We're done once we hit the flexible array member + if (Field->getType()->isIncompleteArrayType()) + break; + + if (Field->isUnnamedBitfield()) + continue; + + // FIXME: volatility + LValue FieldLoc = CGF.EmitLValueForField(DestPtr, *Field, false, 0); + // We never generate write-barries for initialized fields. + LValue::SetObjCNonGC(FieldLoc, true); + if (CurInitVal < NumInitElements) { + // Store the initializer into the field + EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc); + } else { + // We're out of initalizers; default-initialize to null + EmitNullInitializationToLValue(FieldLoc, Field->getType()); + } + } +} + +//===----------------------------------------------------------------------===// +// Entry Points into this File +//===----------------------------------------------------------------------===// + +/// EmitAggExpr - Emit the computation of the specified expression of aggregate +/// type. The result is computed into DestPtr. Note that if DestPtr is null, +/// the value of the aggregate expression is not needed. If VolatileDest is +/// true, DestPtr cannot be 0. +void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr, + bool VolatileDest, bool IgnoreResult) { + assert(E && hasAggregateLLVMType(E->getType()) && + "Invalid aggregate expression to emit"); + assert ((DestPtr != 0 || VolatileDest == false) + && "volatile aggregate can't be 0"); + + AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult) + .Visit(const_cast<Expr*>(E)); +} + +void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) { + assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); + + EmitMemSetToZero(DestPtr, Ty); +} + +void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, + llvm::Value *SrcPtr, QualType Ty, + bool isVolatile) { + assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); + + // Aggregate assignment turns into llvm.memcpy. This is almost valid per + // C99 6.5.16.1p3, which states "If the value being stored in an object is + // read from another object that overlaps in anyway the storage of the first + // object, then the overlap shall be exact and the two objects shall have + // qualified or unqualified versions of a compatible type." + // + // memcpy is not defined if the source and destination pointers are exactly + // equal, but other compilers do this optimization, and almost every memcpy + // implementation handles this case safely. If there is a libc that does not + // safely handle this, we can add a target hook. + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + if (DestPtr->getType() != BP) + DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); + if (SrcPtr->getType() != BP) + SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); + + // Get size and alignment info for this aggregate. + std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); + + // FIXME: Handle variable sized types. + const llvm::Type *IntPtr = llvm::IntegerType::get(LLVMPointerWidth); + + // FIXME: If we have a volatile struct, the optimizer can remove what might + // appear to be `extra' memory ops: + // + // volatile struct { int i; } a, b; + // + // int main() { + // a = b; + // a = b; + // } + // + // we need to use a differnt call here. We use isVolatile to indicate when + // either the source or the destination is volatile. + Builder.CreateCall4(CGM.getMemCpyFn(), + DestPtr, SrcPtr, + // TypeInfo.first describes size in bits. + llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), + llvm::ConstantInt::get(llvm::Type::Int32Ty, + TypeInfo.second/8)); +} diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp new file mode 100644 index 000000000000..41fb725fdf72 --- /dev/null +++ b/lib/CodeGen/CGExprComplex.cpp @@ -0,0 +1,663 @@ +//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with complex types as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/Compiler.h" +using namespace clang; +using namespace CodeGen; + +//===----------------------------------------------------------------------===// +// Complex Expression Emitter +//===----------------------------------------------------------------------===// + +typedef CodeGenFunction::ComplexPairTy ComplexPairTy; + +namespace { +class VISIBILITY_HIDDEN ComplexExprEmitter + : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> { + CodeGenFunction &CGF; + CGBuilderTy &Builder; + // True is we should ignore the value of a + bool IgnoreReal; + bool IgnoreImag; + // True if we should ignore the value of a=b + bool IgnoreRealAssign; + bool IgnoreImagAssign; +public: + ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false, + bool irn=false, bool iin=false) + : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii), + IgnoreRealAssign(irn), IgnoreImagAssign(iin) { + } + + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + bool TestAndClearIgnoreReal() { + bool I = IgnoreReal; + IgnoreReal = false; + return I; + } + bool TestAndClearIgnoreImag() { + bool I = IgnoreImag; + IgnoreImag = false; + return I; + } + bool TestAndClearIgnoreRealAssign() { + bool I = IgnoreRealAssign; + IgnoreRealAssign = false; + return I; + } + bool TestAndClearIgnoreImagAssign() { + bool I = IgnoreImagAssign; + IgnoreImagAssign = false; + return I; + } + + /// EmitLoadOfLValue - Given an expression with complex type that represents a + /// value l-value, this method emits the address of the l-value, then loads + /// and returns the result. + ComplexPairTy EmitLoadOfLValue(const Expr *E) { + LValue LV = CGF.EmitLValue(E); + return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); + } + + /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load + /// the real and imaginary pieces. + ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile); + + /// EmitStoreOfComplex - Store the specified real/imag parts into the + /// specified value pointer. + void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol); + + /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType. + ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType, + QualType DestType); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + ComplexPairTy VisitStmt(Stmt *S) { + S->dump(CGF.getContext().getSourceManager()); + assert(0 && "Stmt can't have complex result type!"); + return ComplexPairTy(); + } + ComplexPairTy VisitExpr(Expr *S); + ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());} + ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL); + + // l-values. + ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); } + + // FIXME: CompoundLiteralExpr + + ComplexPairTy EmitCast(Expr *Op, QualType DestTy); + ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) { + // Unlike for scalars, we don't have to worry about function->ptr demotion + // here. + return EmitCast(E->getSubExpr(), E->getType()); + } + ComplexPairTy VisitCastExpr(CastExpr *E) { + return EmitCast(E->getSubExpr(), E->getType()); + } + ComplexPairTy VisitCallExpr(const CallExpr *E); + ComplexPairTy VisitStmtExpr(const StmtExpr *E); + + // Operators. + ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre); + ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, false); + } + ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, false); + } + ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, true); + } + ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, true); + } + ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + TestAndClearIgnoreRealAssign(); + TestAndClearIgnoreImagAssign(); + return Visit(E->getSubExpr()); + } + ComplexPairTy VisitUnaryMinus (const UnaryOperator *E); + ComplexPairTy VisitUnaryNot (const UnaryOperator *E); + // LNot,Real,Imag never return complex. + ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + return Visit(DAE->getExpr()); + } + ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { + return CGF.EmitCXXExprWithTemporaries(E).getComplexVal(); + } + ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { + assert(E->getType()->isAnyComplexType() && "Expected complex type!"); + QualType Elem = E->getType()->getAsComplexType()->getElementType(); + llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem)); + return ComplexPairTy(Null, Null); + } + ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { + assert(E->getType()->isAnyComplexType() && "Expected complex type!"); + QualType Elem = E->getType()->getAsComplexType()->getElementType(); + llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem)); + return ComplexPairTy(Null, Null); + } + + struct BinOpInfo { + ComplexPairTy LHS; + ComplexPairTy RHS; + QualType Ty; // Computation Type. + }; + + BinOpInfo EmitBinOps(const BinaryOperator *E); + ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E, + ComplexPairTy (ComplexExprEmitter::*Func) + (const BinOpInfo &)); + + ComplexPairTy EmitBinAdd(const BinOpInfo &Op); + ComplexPairTy EmitBinSub(const BinOpInfo &Op); + ComplexPairTy EmitBinMul(const BinOpInfo &Op); + ComplexPairTy EmitBinDiv(const BinOpInfo &Op); + + ComplexPairTy VisitBinMul(const BinaryOperator *E) { + return EmitBinMul(EmitBinOps(E)); + } + ComplexPairTy VisitBinAdd(const BinaryOperator *E) { + return EmitBinAdd(EmitBinOps(E)); + } + ComplexPairTy VisitBinSub(const BinaryOperator *E) { + return EmitBinSub(EmitBinOps(E)); + } + ComplexPairTy VisitBinDiv(const BinaryOperator *E) { + return EmitBinDiv(EmitBinOps(E)); + } + + // Compound assignments. + ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd); + } + ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub); + } + ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul); + } + ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv); + } + + // GCC rejects rem/and/or/xor for integer complex. + // Logical and/or always return int, never complex. + + // No comparisons produce a complex result. + ComplexPairTy VisitBinAssign (const BinaryOperator *E); + ComplexPairTy VisitBinComma (const BinaryOperator *E); + + + ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO); + ComplexPairTy VisitChooseExpr(ChooseExpr *CE); + + ComplexPairTy VisitInitListExpr(InitListExpr *E); + + ComplexPairTy VisitVAArgExpr(VAArgExpr *E); +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to +/// load the real and imaginary pieces, returning them as Real/Imag. +ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr, + bool isVolatile) { + llvm::SmallString<64> Name(SrcPtr->getNameStart(), + SrcPtr->getNameStart()+SrcPtr->getNameLen()); + + llvm::Value *Real=0, *Imag=0; + + if (!IgnoreReal) { + Name += ".realp"; + llvm::Value *RealPtr = Builder.CreateStructGEP(SrcPtr, 0, Name.c_str()); + + Name.pop_back(); // .realp -> .real + Real = Builder.CreateLoad(RealPtr, isVolatile, Name.c_str()); + Name.resize(Name.size()-4); // .real -> .imagp + } + + if (!IgnoreImag) { + Name += "imagp"; + + llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, Name.c_str()); + + Name.pop_back(); // .imagp -> .imag + Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.c_str()); + } + return ComplexPairTy(Real, Imag); +} + +/// EmitStoreOfComplex - Store the specified real/imag parts into the +/// specified value pointer. +void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr, + bool isVolatile) { + llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); + llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); + + Builder.CreateStore(Val.first, RealPtr, isVolatile); + Builder.CreateStore(Val.second, ImagPtr, isVolatile); +} + + + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) { + CGF.ErrorUnsupported(E, "complex expression"); + const llvm::Type *EltTy = + CGF.ConvertType(E->getType()->getAsComplexType()->getElementType()); + llvm::Value *U = llvm::UndefValue::get(EltTy); + return ComplexPairTy(U, U); +} + +ComplexPairTy ComplexExprEmitter:: +VisitImaginaryLiteral(const ImaginaryLiteral *IL) { + llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr()); + return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag); +} + + +ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType()->isReferenceType()) + return EmitLoadOfLValue(E); + + return CGF.EmitCallExpr(E).getComplexVal(); +} + +ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) { + return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal(); +} + +/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType. +ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val, + QualType SrcType, + QualType DestType) { + // Get the src/dest element type. + SrcType = SrcType->getAsComplexType()->getElementType(); + DestType = DestType->getAsComplexType()->getElementType(); + + // C99 6.3.1.6: When a value of complex type is converted to another + // complex type, both the real and imaginary parts follow the conversion + // rules for the corresponding real types. + Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType); + Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType); + return Val; +} + +ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) { + // Two cases here: cast from (complex to complex) and (scalar to complex). + if (Op->getType()->isAnyComplexType()) + return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); + + // C99 6.3.1.7: When a value of real type is converted to a complex type, the + // real part of the complex result value is determined by the rules of + // conversion to the corresponding real type and the imaginary part of the + // complex result value is a positive zero or an unsigned zero. + llvm::Value *Elt = CGF.EmitScalarExpr(Op); + + // Convert the input element to the element type of the complex. + DestTy = DestTy->getAsComplexType()->getElementType(); + Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy); + + // Return (realval, 0). + return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType())); +} + +ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre) { + LValue LV = CGF.EmitLValue(E->getSubExpr()); + ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); + + llvm::Value *NextVal; + if (isa<llvm::IntegerType>(InVal.first->getType())) { + uint64_t AmountVal = isInc ? 1 : -1; + NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); + } else { + QualType ElemTy = E->getType()->getAsComplexType()->getElementType(); + llvm::APFloat FVal(CGF.getContext().getFloatTypeSemantics(ElemTy), 1); + if (!isInc) + FVal.changeSign(); + NextVal = llvm::ConstantFP::get(FVal); + } + + // Add the inc/dec to the real part. + NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); + + ComplexPairTy IncVal(NextVal, InVal.second); + + // Store the updated result through the lvalue. + EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified()); + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? IncVal : InVal; +} + +ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + TestAndClearIgnoreRealAssign(); + TestAndClearIgnoreImagAssign(); + ComplexPairTy Op = Visit(E->getSubExpr()); + llvm::Value *ResR = Builder.CreateNeg(Op.first, "neg.r"); + llvm::Value *ResI = Builder.CreateNeg(Op.second, "neg.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + TestAndClearIgnoreRealAssign(); + TestAndClearIgnoreImagAssign(); + // ~(a+ib) = a + i*-b + ComplexPairTy Op = Visit(E->getSubExpr()); + llvm::Value *ResI = Builder.CreateNeg(Op.second, "conj.i"); + return ComplexPairTy(Op.first, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) { + llvm::Value *ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r"); + llvm::Value *ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) { + llvm::Value *ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r"); + llvm::Value *ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i"); + return ComplexPairTy(ResR, ResI); +} + + +ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) { + llvm::Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl"); + llvm::Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr"); + llvm::Value *ResR = Builder.CreateSub(ResRl, ResRr, "mul.r"); + + llvm::Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il"); + llvm::Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir"); + llvm::Value *ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { + llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second; + llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second; + + // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) + llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c + llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d + llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd + + llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c + llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d + llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd + + llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c + llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d + llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad + + llvm::Value *DSTr, *DSTi; + if (Tmp3->getType()->isFloatingPoint()) { + DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp"); + } else { + if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) { + DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp"); + } else { + DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp"); + } + } + + return ComplexPairTy(DSTr, DSTi); +} + +ComplexExprEmitter::BinOpInfo +ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + TestAndClearIgnoreRealAssign(); + TestAndClearIgnoreImagAssign(); + BinOpInfo Ops; + Ops.LHS = Visit(E->getLHS()); + Ops.RHS = Visit(E->getRHS()); + Ops.Ty = E->getType(); + return Ops; +} + + +// Compound assignments. +ComplexPairTy ComplexExprEmitter:: +EmitCompoundAssign(const CompoundAssignOperator *E, + ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){ + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + bool ignreal = TestAndClearIgnoreRealAssign(); + bool ignimag = TestAndClearIgnoreImagAssign(); + QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); + + BinOpInfo OpInfo; + + // Load the RHS and LHS operands. + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen a little. It is possible for the RHS to be complex or + // scalar. + OpInfo.Ty = E->getComputationResultType(); + OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty); + + LValue LHSLV = CGF.EmitLValue(E->getLHS()); + + + // We know the LHS is a complex lvalue. + OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(),LHSLV.isVolatileQualified()); + OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty); + + // Expand the binary operator. + ComplexPairTy Result = (this->*Func)(OpInfo); + + // Truncate the result back to the LHS type. + Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy); + + // Store the result value into the LHS lvalue. + EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified()); + // And now return the LHS + IgnoreReal = ignreal; + IgnoreImag = ignimag; + IgnoreRealAssign = ignreal; + IgnoreImagAssign = ignimag; + return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified()); +} + +ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + bool ignreal = TestAndClearIgnoreRealAssign(); + bool ignimag = TestAndClearIgnoreImagAssign(); + assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) == + CGF.getContext().getCanonicalType(E->getRHS()->getType()) && + "Invalid assignment"); + // Emit the RHS. + ComplexPairTy Val = Visit(E->getRHS()); + + // Compute the address to store into. + LValue LHS = CGF.EmitLValue(E->getLHS()); + + // Store into it. + EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified()); + // And now return the LHS + IgnoreReal = ignreal; + IgnoreImag = ignimag; + IgnoreRealAssign = ignreal; + IgnoreImagAssign = ignimag; + return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified()); +} + +ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.EmitStmt(E->getLHS()); + CGF.EnsureInsertPoint(); + return Visit(E->getRHS()); +} + +ComplexPairTy ComplexExprEmitter:: +VisitConditionalOperator(const ConditionalOperator *E) { + TestAndClearIgnoreReal(); + TestAndClearIgnoreImag(); + TestAndClearIgnoreRealAssign(); + TestAndClearIgnoreImagAssign(); + llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); + llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); + + llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); + Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + assert(E->getLHS() && "Must have LHS for complex value"); + + ComplexPairTy LHS = Visit(E->getLHS()); + LHSBlock = Builder.GetInsertBlock(); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(RHSBlock); + + ComplexPairTy RHS = Visit(E->getRHS()); + RHSBlock = Builder.GetInsertBlock(); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(ContBlock); + + // Create a PHI node for the real part. + llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r"); + RealPN->reserveOperandSpace(2); + RealPN->addIncoming(LHS.first, LHSBlock); + RealPN->addIncoming(RHS.first, RHSBlock); + + // Create a PHI node for the imaginary part. + llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i"); + ImagPN->reserveOperandSpace(2); + ImagPN->addIncoming(LHS.second, LHSBlock); + ImagPN->addIncoming(RHS.second, RHSBlock); + + return ComplexPairTy(RealPN, ImagPN); +} + +ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) { + return Visit(E->getChosenSubExpr(CGF.getContext())); +} + +ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) { + bool Ignore = TestAndClearIgnoreReal(); + (void)Ignore; + assert (Ignore == false && "init list ignored"); + Ignore = TestAndClearIgnoreImag(); + (void)Ignore; + assert (Ignore == false && "init list ignored"); + if (E->getNumInits()) + return Visit(E->getInit(0)); + + // Empty init list intializes to null + QualType Ty = E->getType()->getAsComplexType()->getElementType(); + const llvm::Type* LTy = CGF.ConvertType(Ty); + llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy); + return ComplexPairTy(zeroConstant, zeroConstant); +} + +ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) { + llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr()); + llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType()); + + if (!ArgPtr) { + CGF.ErrorUnsupported(E, "complex va_arg expression"); + const llvm::Type *EltTy = + CGF.ConvertType(E->getType()->getAsComplexType()->getElementType()); + llvm::Value *U = llvm::UndefValue::get(EltTy); + return ComplexPairTy(U, U); + } + + // FIXME Volatility. + return EmitLoadOfComplex(ArgPtr, false); +} + +//===----------------------------------------------------------------------===// +// Entry Point into this File +//===----------------------------------------------------------------------===// + +/// EmitComplexExpr - Emit the computation of the specified expression of +/// complex type, ignoring the result. +ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal, + bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) { + assert(E && E->getType()->isAnyComplexType() && + "Invalid complex expression to emit"); + + return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign, + IgnoreImagAssign) + .Visit(const_cast<Expr*>(E)); +} + +/// EmitComplexExprIntoAddr - Emit the computation of the specified expression +/// of complex type, storing into the specified Value*. +void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E, + llvm::Value *DestAddr, + bool DestIsVolatile) { + assert(E && E->getType()->isAnyComplexType() && + "Invalid complex expression to emit"); + ComplexExprEmitter Emitter(*this); + ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E)); + Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile); +} + +/// StoreComplexToAddr - Store a complex number into the specified address. +void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V, + llvm::Value *DestAddr, + bool DestIsVolatile) { + ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile); +} + +/// LoadComplexFromAddr - Load a complex number from the specified address. +ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr, + bool SrcIsVolatile) { + return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile); +} diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp new file mode 100644 index 000000000000..b30bafb51051 --- /dev/null +++ b/lib/CodeGen/CGExprConstant.cpp @@ -0,0 +1,588 @@ +//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Constant Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "CGObjCRuntime.h" +#include "clang/AST/APValue.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Target/TargetData.h" +using namespace clang; +using namespace CodeGen; + +namespace { +class VISIBILITY_HIDDEN ConstExprEmitter : + public StmtVisitor<ConstExprEmitter, llvm::Constant*> { + CodeGenModule &CGM; + CodeGenFunction *CGF; +public: + ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf) + : CGM(cgm), CGF(cgf) { + } + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + llvm::Constant *VisitStmt(Stmt *S) { + return 0; + } + + llvm::Constant *VisitParenExpr(ParenExpr *PE) { + return Visit(PE->getSubExpr()); + } + + llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + return Visit(E->getInitializer()); + } + + llvm::Constant *VisitCastExpr(CastExpr* E) { + // GCC cast to union extension + if (E->getType()->isUnionType()) { + const llvm::Type *Ty = ConvertType(E->getType()); + Expr *SubExpr = E->getSubExpr(); + return EmitUnion(CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF), + Ty); + } + // Explicit and implicit no-op casts + QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType(); + if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy)) { + return Visit(E->getSubExpr()); + } + return 0; + } + + llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + return Visit(DAE->getExpr()); + } + + llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) { + std::vector<llvm::Constant*> Elts; + const llvm::ArrayType *AType = + cast<llvm::ArrayType>(ConvertType(ILE->getType())); + unsigned NumInitElements = ILE->getNumInits(); + // FIXME: Check for wide strings + // FIXME: Check for NumInitElements exactly equal to 1?? + if (NumInitElements > 0 && + (isa<StringLiteral>(ILE->getInit(0)) || + isa<ObjCEncodeExpr>(ILE->getInit(0))) && + ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType()) + return Visit(ILE->getInit(0)); + const llvm::Type *ElemTy = AType->getElementType(); + unsigned NumElements = AType->getNumElements(); + + // Initialising an array requires us to automatically + // initialise any elements that have not been initialised explicitly + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + // Copy initializer elements. + unsigned i = 0; + bool RewriteType = false; + for (; i < NumInitableElts; ++i) { + Expr *Init = ILE->getInit(i); + llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF); + if (!C) + return 0; + RewriteType |= (C->getType() != ElemTy); + Elts.push_back(C); + } + + // Initialize remaining array elements. + // FIXME: This doesn't handle member pointers correctly! + for (; i < NumElements; ++i) + Elts.push_back(llvm::Constant::getNullValue(ElemTy)); + + if (RewriteType) { + // FIXME: Try to avoid packing the array + std::vector<const llvm::Type*> Types; + for (unsigned i = 0; i < Elts.size(); ++i) + Types.push_back(Elts[i]->getType()); + const llvm::StructType *SType = llvm::StructType::get(Types, true); + return llvm::ConstantStruct::get(SType, Elts); + } + + return llvm::ConstantArray::get(AType, Elts); + } + + void InsertBitfieldIntoStruct(std::vector<llvm::Constant*>& Elts, + FieldDecl* Field, Expr* E) { + // Calculate the value to insert + llvm::Constant *C = CGM.EmitConstantExpr(E, Field->getType(), CGF); + if (!C) + return; + + llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(C); + if (!CI) { + CGM.ErrorUnsupported(E, "bitfield initialization"); + return; + } + llvm::APInt V = CI->getValue(); + + // Calculate information about the relevant field + const llvm::Type* Ty = CI->getType(); + const llvm::TargetData &TD = CGM.getTypes().getTargetData(); + unsigned size = TD.getTypeAllocSizeInBits(Ty); + unsigned fieldOffset = CGM.getTypes().getLLVMFieldNo(Field) * size; + CodeGenTypes::BitFieldInfo bitFieldInfo = + CGM.getTypes().getBitFieldInfo(Field); + fieldOffset += bitFieldInfo.Begin; + + // Find where to start the insertion + // FIXME: This is O(n^2) in the number of bit-fields! + // FIXME: This won't work if the struct isn't completely packed! + unsigned offset = 0, i = 0; + while (offset < (fieldOffset & -8)) + offset += TD.getTypeAllocSizeInBits(Elts[i++]->getType()); + + // Advance over 0 sized elements (must terminate in bounds since + // the bitfield must have a size). + while (TD.getTypeAllocSizeInBits(Elts[i]->getType()) == 0) + ++i; + + // Promote the size of V if necessary + // FIXME: This should never occur, but currently it can because initializer + // constants are cast to bool, and because clang is not enforcing bitfield + // width limits. + if (bitFieldInfo.Size > V.getBitWidth()) + V.zext(bitFieldInfo.Size); + + // Insert the bits into the struct + // FIXME: This algorthm is only correct on X86! + // FIXME: THis algorthm assumes bit-fields only have byte-size elements! + unsigned bitsToInsert = bitFieldInfo.Size; + unsigned curBits = std::min(8 - (fieldOffset & 7), bitsToInsert); + unsigned byte = V.getLoBits(curBits).getZExtValue() << (fieldOffset & 7); + do { + llvm::Constant* byteC = llvm::ConstantInt::get(llvm::Type::Int8Ty, byte); + Elts[i] = llvm::ConstantExpr::getOr(Elts[i], byteC); + ++i; + V = V.lshr(curBits); + bitsToInsert -= curBits; + + if (!bitsToInsert) + break; + + curBits = bitsToInsert > 8 ? 8 : bitsToInsert; + byte = V.getLoBits(curBits).getZExtValue(); + } while (true); + } + + llvm::Constant *EmitStructInitialization(InitListExpr *ILE) { + const llvm::StructType *SType = + cast<llvm::StructType>(ConvertType(ILE->getType())); + RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl(); + std::vector<llvm::Constant*> Elts; + + // Initialize the whole structure to zero. + // FIXME: This doesn't handle member pointers correctly! + for (unsigned i = 0; i < SType->getNumElements(); ++i) { + const llvm::Type *FieldTy = SType->getElementType(i); + Elts.push_back(llvm::Constant::getNullValue(FieldTy)); + } + + // Copy initializer elements. Skip padding fields. + unsigned EltNo = 0; // Element no in ILE + int FieldNo = 0; // Field no in RecordDecl + bool RewriteType = false; + for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()), + FieldEnd = RD->field_end(CGM.getContext()); + EltNo < ILE->getNumInits() && Field != FieldEnd; ++Field) { + FieldNo++; + if (!Field->getIdentifier()) + continue; + + if (Field->isBitField()) { + InsertBitfieldIntoStruct(Elts, *Field, ILE->getInit(EltNo)); + } else { + unsigned FieldNo = CGM.getTypes().getLLVMFieldNo(*Field); + llvm::Constant *C = CGM.EmitConstantExpr(ILE->getInit(EltNo), + Field->getType(), CGF); + if (!C) return 0; + RewriteType |= (C->getType() != Elts[FieldNo]->getType()); + Elts[FieldNo] = C; + } + EltNo++; + } + + if (RewriteType) { + // FIXME: Make this work for non-packed structs + assert(SType->isPacked() && "Cannot recreate unpacked structs"); + std::vector<const llvm::Type*> Types; + for (unsigned i = 0; i < Elts.size(); ++i) + Types.push_back(Elts[i]->getType()); + SType = llvm::StructType::get(Types, true); + } + + return llvm::ConstantStruct::get(SType, Elts); + } + + llvm::Constant *EmitUnion(llvm::Constant *C, const llvm::Type *Ty) { + if (!C) + return 0; + + // Build a struct with the union sub-element as the first member, + // and padded to the appropriate size + std::vector<llvm::Constant*> Elts; + std::vector<const llvm::Type*> Types; + Elts.push_back(C); + Types.push_back(C->getType()); + unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType()); + unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty); + while (CurSize < TotalSize) { + Elts.push_back(llvm::Constant::getNullValue(llvm::Type::Int8Ty)); + Types.push_back(llvm::Type::Int8Ty); + CurSize++; + } + + // This always generates a packed struct + // FIXME: Try to generate an unpacked struct when we can + llvm::StructType* STy = llvm::StructType::get(Types, true); + return llvm::ConstantStruct::get(STy, Elts); + } + + llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) { + const llvm::Type *Ty = ConvertType(ILE->getType()); + + FieldDecl* curField = ILE->getInitializedFieldInUnion(); + if (!curField) { + // There's no field to initialize, so value-initialize the union. +#ifndef NDEBUG + // Make sure that it's really an empty and not a failure of + // semantic analysis. + RecordDecl *RD = ILE->getType()->getAsRecordType()->getDecl(); + for (RecordDecl::field_iterator Field = RD->field_begin(CGM.getContext()), + FieldEnd = RD->field_end(CGM.getContext()); + Field != FieldEnd; ++Field) + assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); +#endif + return llvm::Constant::getNullValue(Ty); + } + + if (curField->isBitField()) { + // Create a dummy struct for bit-field insertion + unsigned NumElts = CGM.getTargetData().getTypeAllocSize(Ty); + llvm::Constant* NV = llvm::Constant::getNullValue(llvm::Type::Int8Ty); + std::vector<llvm::Constant*> Elts(NumElts, NV); + + InsertBitfieldIntoStruct(Elts, curField, ILE->getInit(0)); + const llvm::ArrayType *RetTy = + llvm::ArrayType::get(NV->getType(), NumElts); + return llvm::ConstantArray::get(RetTy, Elts); + } + + llvm::Constant *InitElem; + if (ILE->getNumInits() > 0) { + Expr *Init = ILE->getInit(0); + InitElem = CGM.EmitConstantExpr(Init, Init->getType(), CGF); + } else { + InitElem = CGM.EmitNullConstant(curField->getType()); + } + return EmitUnion(InitElem, Ty); + } + + llvm::Constant *EmitVectorInitialization(InitListExpr *ILE) { + const llvm::VectorType *VType = + cast<llvm::VectorType>(ConvertType(ILE->getType())); + const llvm::Type *ElemTy = VType->getElementType(); + std::vector<llvm::Constant*> Elts; + unsigned NumElements = VType->getNumElements(); + unsigned NumInitElements = ILE->getNumInits(); + + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + // Copy initializer elements. + unsigned i = 0; + for (; i < NumInitableElts; ++i) { + Expr *Init = ILE->getInit(i); + llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF); + if (!C) + return 0; + Elts.push_back(C); + } + + for (; i < NumElements; ++i) + Elts.push_back(llvm::Constant::getNullValue(ElemTy)); + + return llvm::ConstantVector::get(VType, Elts); + } + + llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) { + return CGM.EmitNullConstant(E->getType()); + } + + llvm::Constant *VisitInitListExpr(InitListExpr *ILE) { + if (ILE->getType()->isScalarType()) { + // We have a scalar in braces. Just use the first element. + if (ILE->getNumInits() > 0) { + Expr *Init = ILE->getInit(0); + return CGM.EmitConstantExpr(Init, Init->getType(), CGF); + } + return CGM.EmitNullConstant(ILE->getType()); + } + + if (ILE->getType()->isArrayType()) + return EmitArrayInitialization(ILE); + + if (ILE->getType()->isStructureType()) + return EmitStructInitialization(ILE); + + if (ILE->getType()->isUnionType()) + return EmitUnionInitialization(ILE); + + if (ILE->getType()->isVectorType()) + return EmitVectorInitialization(ILE); + + assert(0 && "Unable to handle InitListExpr"); + // Get rid of control reaches end of void function warning. + // Not reached. + return 0; + } + + llvm::Constant *VisitStringLiteral(StringLiteral *E) { + assert(!E->getType()->isPointerType() && "Strings are always arrays"); + + // This must be a string initializing an array in a static initializer. + // Don't emit it as the address of the string, emit the string data itself + // as an inline array. + return llvm::ConstantArray::get(CGM.GetStringForStringLiteral(E), false); + } + + llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) { + // This must be an @encode initializing an array in a static initializer. + // Don't emit it as the address of the string, emit the string data itself + // as an inline array. + std::string Str; + CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str); + const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType()); + + // Resize the string to the right size, adding zeros at the end, or + // truncating as needed. + Str.resize(CAT->getSize().getZExtValue(), '\0'); + return llvm::ConstantArray::get(Str, false); + } + + llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + + // Utility methods + const llvm::Type *ConvertType(QualType T) { + return CGM.getTypes().ConvertType(T); + } + +public: + llvm::Constant *EmitLValue(Expr *E) { + switch (E->getStmtClass()) { + default: break; + case Expr::CompoundLiteralExprClass: { + // Note that due to the nature of compound literals, this is guaranteed + // to be the only use of the variable, so we just generate it here. + CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); + llvm::Constant* C = Visit(CLE->getInitializer()); + // FIXME: "Leaked" on failure. + if (C) + C = new llvm::GlobalVariable(C->getType(), + E->getType().isConstQualified(), + llvm::GlobalValue::InternalLinkage, + C, ".compoundliteral", &CGM.getModule()); + return C; + } + case Expr::DeclRefExprClass: + case Expr::QualifiedDeclRefExprClass: { + NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl)) + return CGM.GetAddrOfFunction(GlobalDecl(FD)); + if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) { + // We can never refer to a variable with local storage. + if (!VD->hasLocalStorage()) { + if (VD->isFileVarDecl() || VD->hasExternalStorage()) + return CGM.GetAddrOfGlobalVar(VD); + else if (VD->isBlockVarDecl()) { + assert(CGF && "Can't access static local vars without CGF"); + return CGF->GetAddrOfStaticLocalVar(VD); + } + } + } + break; + } + case Expr::StringLiteralClass: + return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E)); + case Expr::ObjCEncodeExprClass: + return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E)); + case Expr::ObjCStringLiteralClass: { + ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E); + llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL); + return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); + } + case Expr::PredefinedExprClass: { + // __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level". + std::string Str; + if (cast<PredefinedExpr>(E)->getIdentType() == + PredefinedExpr::PrettyFunction) + Str = "top level"; + + return CGM.GetAddrOfConstantCString(Str, ".tmp"); + } + case Expr::AddrLabelExprClass: { + assert(CGF && "Invalid address of label expression outside function."); + unsigned id = CGF->GetIDForAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel()); + llvm::Constant *C = llvm::ConstantInt::get(llvm::Type::Int32Ty, id); + return llvm::ConstantExpr::getIntToPtr(C, ConvertType(E->getType())); + } + case Expr::CallExprClass: { + CallExpr* CE = cast<CallExpr>(E); + if (CE->isBuiltinCall(CGM.getContext()) != + Builtin::BI__builtin___CFStringMakeConstantString) + break; + const Expr *Arg = CE->getArg(0)->IgnoreParenCasts(); + const StringLiteral *Literal = cast<StringLiteral>(Arg); + // FIXME: need to deal with UCN conversion issues. + return CGM.GetAddrOfConstantCFString(Literal); + } + case Expr::BlockExprClass: { + std::string FunctionName; + if (CGF) + FunctionName = CGF->CurFn->getName(); + else + FunctionName = "global"; + + return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str()); + } + } + + return 0; + } +}; + +} // end anonymous namespace. + +llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, + QualType DestType, + CodeGenFunction *CGF) { + Expr::EvalResult Result; + + bool Success = false; + + if (DestType->isReferenceType()) + Success = E->EvaluateAsLValue(Result, Context); + else + Success = E->Evaluate(Result, Context); + + if (Success) { + assert(!Result.HasSideEffects && + "Constant expr should not have any side effects!"); + switch (Result.Val.getKind()) { + case APValue::Uninitialized: + assert(0 && "Constant expressions should be initialized."); + return 0; + case APValue::LValue: { + const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType); + llvm::Constant *Offset = + llvm::ConstantInt::get(llvm::Type::Int64Ty, + Result.Val.getLValueOffset()); + + llvm::Constant *C; + if (const Expr *LVBase = Result.Val.getLValueBase()) { + C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase)); + + // Apply offset if necessary. + if (!Offset->isNullValue()) { + const llvm::Type *Type = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type); + Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1); + C = llvm::ConstantExpr::getBitCast(Casted, C->getType()); + } + + // Convert to the appropriate type; this could be an lvalue for + // an integer. + if (isa<llvm::PointerType>(DestTy)) + return llvm::ConstantExpr::getBitCast(C, DestTy); + + return llvm::ConstantExpr::getPtrToInt(C, DestTy); + } else { + C = Offset; + + // Convert to the appropriate type; this could be an lvalue for + // an integer. + if (isa<llvm::PointerType>(DestTy)) + return llvm::ConstantExpr::getIntToPtr(C, DestTy); + + // If the types don't match this should only be a truncate. + if (C->getType() != DestTy) + return llvm::ConstantExpr::getTrunc(C, DestTy); + + return C; + } + } + case APValue::Int: { + llvm::Constant *C = llvm::ConstantInt::get(Result.Val.getInt()); + + if (C->getType() == llvm::Type::Int1Ty) { + const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); + C = llvm::ConstantExpr::getZExt(C, BoolTy); + } + return C; + } + case APValue::ComplexInt: { + llvm::Constant *Complex[2]; + + Complex[0] = llvm::ConstantInt::get(Result.Val.getComplexIntReal()); + Complex[1] = llvm::ConstantInt::get(Result.Val.getComplexIntImag()); + + return llvm::ConstantStruct::get(Complex, 2); + } + case APValue::Float: + return llvm::ConstantFP::get(Result.Val.getFloat()); + case APValue::ComplexFloat: { + llvm::Constant *Complex[2]; + + Complex[0] = llvm::ConstantFP::get(Result.Val.getComplexFloatReal()); + Complex[1] = llvm::ConstantFP::get(Result.Val.getComplexFloatImag()); + + return llvm::ConstantStruct::get(Complex, 2); + } + case APValue::Vector: { + llvm::SmallVector<llvm::Constant *, 4> Inits; + unsigned NumElts = Result.Val.getVectorLength(); + + for (unsigned i = 0; i != NumElts; ++i) { + APValue &Elt = Result.Val.getVectorElt(i); + if (Elt.isInt()) + Inits.push_back(llvm::ConstantInt::get(Elt.getInt())); + else + Inits.push_back(llvm::ConstantFP::get(Elt.getFloat())); + } + return llvm::ConstantVector::get(&Inits[0], Inits.size()); + } + } + } + + llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); + if (C && C->getType() == llvm::Type::Int1Ty) { + const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); + C = llvm::ConstantExpr::getZExt(C, BoolTy); + } + return C; +} + +llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { + // Always return an LLVM null constant for now; this will change when we + // get support for IRGen of member pointers. + return llvm::Constant::getNullValue(getTypes().ConvertType(T)); +} diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp new file mode 100644 index 000000000000..950e9e55095c --- /dev/null +++ b/lib/CodeGen/CGExprScalar.cpp @@ -0,0 +1,1575 @@ +//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with scalar LLVM types as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtVisitor.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Intrinsics.h" +#include "llvm/Module.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/CFG.h" +#include "llvm/Target/TargetData.h" +#include <cstdarg> + +using namespace clang; +using namespace CodeGen; +using llvm::Value; + +//===----------------------------------------------------------------------===// +// Scalar Expression Emitter +//===----------------------------------------------------------------------===// + +struct BinOpInfo { + Value *LHS; + Value *RHS; + QualType Ty; // Computation Type. + const BinaryOperator *E; +}; + +namespace { +class VISIBILITY_HIDDEN ScalarExprEmitter + : public StmtVisitor<ScalarExprEmitter, Value*> { + CodeGenFunction &CGF; + CGBuilderTy &Builder; + bool IgnoreResultAssign; + +public: + + ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) + : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira) { + } + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + bool TestAndClearIgnoreResultAssign() { + bool I = IgnoreResultAssign; IgnoreResultAssign = false; + return I; } + + const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } + LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } + + Value *EmitLoadOfLValue(LValue LV, QualType T) { + return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); + } + + /// EmitLoadOfLValue - Given an expression with complex type that represents a + /// value l-value, this method emits the address of the l-value, then loads + /// and returns the result. + Value *EmitLoadOfLValue(const Expr *E) { + return EmitLoadOfLValue(EmitLValue(E), E->getType()); + } + + /// EmitConversionToBool - Convert the specified expression value to a + /// boolean (i1) truth value. This is equivalent to "Val != 0". + Value *EmitConversionToBool(Value *Src, QualType DstTy); + + /// EmitScalarConversion - Emit a conversion from the specified type to the + /// specified destination type, both of which are LLVM scalar types. + Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); + + /// EmitComplexToScalarConversion - Emit a conversion from the specified + /// complex type to the specified destination type, where the destination + /// type is an LLVM scalar type. + Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, + QualType SrcTy, QualType DstTy); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + Value *VisitStmt(Stmt *S) { + S->dump(CGF.getContext().getSourceManager()); + assert(0 && "Stmt can't have complex result type!"); + return 0; + } + Value *VisitExpr(Expr *S); + Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } + + // Leaves. + Value *VisitIntegerLiteral(const IntegerLiteral *E) { + return llvm::ConstantInt::get(E->getValue()); + } + Value *VisitFloatingLiteral(const FloatingLiteral *E) { + return llvm::ConstantFP::get(E->getValue()); + } + Value *VisitCharacterLiteral(const CharacterLiteral *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) { + return llvm::Constant::getNullValue(ConvertType(E->getType())); + } + Value *VisitGNUNullExpr(const GNUNullExpr *E) { + return llvm::Constant::getNullValue(ConvertType(E->getType())); + } + Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), + CGF.getContext().typesAreCompatible( + E->getArgType1(), E->getArgType2())); + } + Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); + Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { + llvm::Value *V = + llvm::ConstantInt::get(llvm::Type::Int32Ty, + CGF.GetIDForAddrOfLabel(E->getLabel())); + + return Builder.CreateIntToPtr(V, ConvertType(E->getType())); + } + + // l-values. + Value *VisitDeclRefExpr(DeclRefExpr *E) { + if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl())) + return llvm::ConstantInt::get(EC->getInitVal()); + return EmitLoadOfLValue(E); + } + Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { + return CGF.EmitObjCSelectorExpr(E); + } + Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { + return CGF.EmitObjCProtocolExpr(E); + } + Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { + return EmitLoadOfLValue(E); + } + Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { + return EmitLoadOfLValue(E); + } + Value *VisitObjCKVCRefExpr(ObjCKVCRefExpr *E) { + return EmitLoadOfLValue(E); + } + Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { + return CGF.EmitObjCMessageExpr(E).getScalarVal(); + } + + Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); + Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); + Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + return EmitLoadOfLValue(E); + } + Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); } + Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { + return EmitLValue(E).getAddress(); + } + + Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); } + + Value *VisitInitListExpr(InitListExpr *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + (void)Ignore; + assert (Ignore == false && "init list ignored"); + unsigned NumInitElements = E->getNumInits(); + + if (E->hadArrayRangeDesignator()) { + CGF.ErrorUnsupported(E, "GNU array range designator extension"); + } + + const llvm::VectorType *VType = + dyn_cast<llvm::VectorType>(ConvertType(E->getType())); + + // We have a scalar in braces. Just use the first element. + if (!VType) + return Visit(E->getInit(0)); + + unsigned NumVectorElements = VType->getNumElements(); + const llvm::Type *ElementType = VType->getElementType(); + + // Emit individual vector element stores. + llvm::Value *V = llvm::UndefValue::get(VType); + + // Emit initializers + unsigned i; + for (i = 0; i < NumInitElements; ++i) { + Value *NewV = Visit(E->getInit(i)); + Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + V = Builder.CreateInsertElement(V, NewV, Idx); + } + + // Emit remaining default initializers + for (/* Do not initialize i*/; i < NumVectorElements; ++i) { + Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + llvm::Value *NewV = llvm::Constant::getNullValue(ElementType); + V = Builder.CreateInsertElement(V, NewV, Idx); + } + + return V; + } + + Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { + return llvm::Constant::getNullValue(ConvertType(E->getType())); + } + Value *VisitImplicitCastExpr(const ImplicitCastExpr *E); + Value *VisitCastExpr(const CastExpr *E) { + // Make sure to evaluate VLA bounds now so that we have them for later. + if (E->getType()->isVariablyModifiedType()) + CGF.EmitVLASize(E->getType()); + + return EmitCastExpr(E->getSubExpr(), E->getType()); + } + Value *EmitCastExpr(const Expr *E, QualType T); + + Value *VisitCallExpr(const CallExpr *E) { + if (E->getCallReturnType()->isReferenceType()) + return EmitLoadOfLValue(E); + + return CGF.EmitCallExpr(E).getScalarVal(); + } + + Value *VisitStmtExpr(const StmtExpr *E); + + Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); + + // Unary Operators. + Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre); + Value *VisitUnaryPostDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, false); + } + Value *VisitUnaryPostInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, false); + } + Value *VisitUnaryPreDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, true); + } + Value *VisitUnaryPreInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, true); + } + Value *VisitUnaryAddrOf(const UnaryOperator *E) { + return EmitLValue(E->getSubExpr()).getAddress(); + } + Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitUnaryPlus(const UnaryOperator *E) { + // This differs from gcc, though, most likely due to a bug in gcc. + TestAndClearIgnoreResultAssign(); + return Visit(E->getSubExpr()); + } + Value *VisitUnaryMinus (const UnaryOperator *E); + Value *VisitUnaryNot (const UnaryOperator *E); + Value *VisitUnaryLNot (const UnaryOperator *E); + Value *VisitUnaryReal (const UnaryOperator *E); + Value *VisitUnaryImag (const UnaryOperator *E); + Value *VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + Value *VisitUnaryOffsetOf(const UnaryOperator *E); + + // C++ + Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { + return Visit(DAE->getExpr()); + } + Value *VisitCXXThisExpr(CXXThisExpr *TE) { + return CGF.LoadCXXThis(); + } + + Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { + return CGF.EmitCXXExprWithTemporaries(E).getScalarVal(); + } + Value *VisitCXXNewExpr(const CXXNewExpr *E) { + return CGF.EmitCXXNewExpr(E); + } + + // Binary Operators. + Value *EmitMul(const BinOpInfo &Ops) { + if (CGF.getContext().getLangOptions().OverflowChecking + && Ops.Ty->isSignedIntegerType()) + return EmitOverflowCheckedBinOp(Ops); + return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); + } + /// Create a binary op that checks for overflow. + /// Currently only supports +, - and *. + Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); + Value *EmitDiv(const BinOpInfo &Ops); + Value *EmitRem(const BinOpInfo &Ops); + Value *EmitAdd(const BinOpInfo &Ops); + Value *EmitSub(const BinOpInfo &Ops); + Value *EmitShl(const BinOpInfo &Ops); + Value *EmitShr(const BinOpInfo &Ops); + Value *EmitAnd(const BinOpInfo &Ops) { + return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); + } + Value *EmitXor(const BinOpInfo &Ops) { + return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); + } + Value *EmitOr (const BinOpInfo &Ops) { + return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); + } + + BinOpInfo EmitBinOps(const BinaryOperator *E); + Value *EmitCompoundAssign(const CompoundAssignOperator *E, + Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + Value *VisitBin ## OP(const BinaryOperator *E) { \ + return Emit ## OP(EmitBinOps(E)); \ + } \ + Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ + return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ + } + HANDLEBINOP(Mul); + HANDLEBINOP(Div); + HANDLEBINOP(Rem); + HANDLEBINOP(Add); + HANDLEBINOP(Sub); + HANDLEBINOP(Shl); + HANDLEBINOP(Shr); + HANDLEBINOP(And); + HANDLEBINOP(Xor); + HANDLEBINOP(Or); +#undef HANDLEBINOP + + // Comparisons. + Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, + unsigned SICmpOpc, unsigned FCmpOpc); +#define VISITCOMP(CODE, UI, SI, FP) \ + Value *VisitBin##CODE(const BinaryOperator *E) { \ + return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ + llvm::FCmpInst::FP); } + VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT); + VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT); + VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE); + VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE); + VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ); + VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE); +#undef VISITCOMP + + Value *VisitBinAssign (const BinaryOperator *E); + + Value *VisitBinLAnd (const BinaryOperator *E); + Value *VisitBinLOr (const BinaryOperator *E); + Value *VisitBinComma (const BinaryOperator *E); + + // Other Operators. + Value *VisitBlockExpr(const BlockExpr *BE); + Value *VisitConditionalOperator(const ConditionalOperator *CO); + Value *VisitChooseExpr(ChooseExpr *CE); + Value *VisitVAArgExpr(VAArgExpr *VE); + Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { + return CGF.EmitObjCStringLiteral(E); + } +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// EmitConversionToBool - Convert the specified expression value to a +/// boolean (i1) truth value. This is equivalent to "Val != 0". +Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { + assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) { + // Compare against 0.0 for fp scalars. + llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); + return Builder.CreateFCmpUNE(Src, Zero, "tobool"); + } + + assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && + "Unknown scalar type to convert"); + + // Because of the type rules of C, we often end up computing a logical value, + // then zero extending it to int, then wanting it as a logical value again. + // Optimize this common case. + if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) { + if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) { + Value *Result = ZI->getOperand(0); + // If there aren't any more uses, zap the instruction to save space. + // Note that there can be more uses, for example if this + // is the result of an assignment. + if (ZI->use_empty()) + ZI->eraseFromParent(); + return Result; + } + } + + // Compare against an integer or pointer null. + llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); + return Builder.CreateICmpNE(Src, Zero, "tobool"); +} + +/// EmitScalarConversion - Emit a conversion from the specified type to the +/// specified destination type, both of which are LLVM scalar types. +Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, + QualType DstType) { + SrcType = CGF.getContext().getCanonicalType(SrcType); + DstType = CGF.getContext().getCanonicalType(DstType); + if (SrcType == DstType) return Src; + + if (DstType->isVoidType()) return 0; + + // Handle conversions to bool first, they are special: comparisons against 0. + if (DstType->isBooleanType()) + return EmitConversionToBool(Src, SrcType); + + const llvm::Type *DstTy = ConvertType(DstType); + + // Ignore conversions like int -> uint. + if (Src->getType() == DstTy) + return Src; + + // Handle pointer conversions next: pointers can only be converted + // to/from other pointers and integers. Check for pointer types in + // terms of LLVM, as some native types (like Obj-C id) may map to a + // pointer type. + if (isa<llvm::PointerType>(DstTy)) { + // The source value may be an integer, or a pointer. + if (isa<llvm::PointerType>(Src->getType())) + return Builder.CreateBitCast(Src, DstTy, "conv"); + assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); + // First, convert to the correct width so that we control the kind of + // extension. + const llvm::Type *MiddleTy = llvm::IntegerType::get(CGF.LLVMPointerWidth); + bool InputSigned = SrcType->isSignedIntegerType(); + llvm::Value* IntResult = + Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); + // Then, cast to pointer. + return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); + } + + if (isa<llvm::PointerType>(Src->getType())) { + // Must be an ptr to int cast. + assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); + return Builder.CreatePtrToInt(Src, DstTy, "conv"); + } + + // A scalar can be splatted to an extended vector of the same element type + if (DstType->isExtVectorType() && !isa<VectorType>(SrcType)) { + // Cast the scalar to element type + QualType EltTy = DstType->getAsExtVectorType()->getElementType(); + llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); + + // Insert the element in element zero of an undef vector + llvm::Value *UnV = llvm::UndefValue::get(DstTy); + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); + + // Splat the element across to all elements + llvm::SmallVector<llvm::Constant*, 16> Args; + unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); + for (unsigned i = 0; i < NumElements; i++) + Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + + llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); + llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); + return Yay; + } + + // Allow bitcast from vector to integer/fp of the same size. + if (isa<llvm::VectorType>(Src->getType()) || + isa<llvm::VectorType>(DstTy)) + return Builder.CreateBitCast(Src, DstTy, "conv"); + + // Finally, we have the arithmetic types: real int/float. + if (isa<llvm::IntegerType>(Src->getType())) { + bool InputSigned = SrcType->isSignedIntegerType(); + if (isa<llvm::IntegerType>(DstTy)) + return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); + else if (InputSigned) + return Builder.CreateSIToFP(Src, DstTy, "conv"); + else + return Builder.CreateUIToFP(Src, DstTy, "conv"); + } + + assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); + if (isa<llvm::IntegerType>(DstTy)) { + if (DstType->isSignedIntegerType()) + return Builder.CreateFPToSI(Src, DstTy, "conv"); + else + return Builder.CreateFPToUI(Src, DstTy, "conv"); + } + + assert(DstTy->isFloatingPoint() && "Unknown real conversion"); + if (DstTy->getTypeID() < Src->getType()->getTypeID()) + return Builder.CreateFPTrunc(Src, DstTy, "conv"); + else + return Builder.CreateFPExt(Src, DstTy, "conv"); +} + +/// EmitComplexToScalarConversion - Emit a conversion from the specified +/// complex type to the specified destination type, where the destination +/// type is an LLVM scalar type. +Value *ScalarExprEmitter:: +EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, + QualType SrcTy, QualType DstTy) { + // Get the source element type. + SrcTy = SrcTy->getAsComplexType()->getElementType(); + + // Handle conversions to bool first, they are special: comparisons against 0. + if (DstTy->isBooleanType()) { + // Complex != 0 -> (Real != 0) | (Imag != 0) + Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); + Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); + return Builder.CreateOr(Src.first, Src.second, "tobool"); + } + + // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, + // the imaginary part of the complex value is discarded and the value of the + // real part is converted according to the conversion rules for the + // corresponding real type. + return EmitScalarConversion(Src.first, SrcTy, DstTy); +} + + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +Value *ScalarExprEmitter::VisitExpr(Expr *E) { + CGF.ErrorUnsupported(E, "scalar expression"); + if (E->getType()->isVoidType()) + return 0; + return llvm::UndefValue::get(CGF.ConvertType(E->getType())); +} + +Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { + llvm::SmallVector<llvm::Constant*, 32> indices; + for (unsigned i = 2; i < E->getNumSubExprs(); i++) { + indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i)))); + } + Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); + Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); + Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size()); + return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); +} + +Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + TestAndClearIgnoreResultAssign(); + + // Emit subscript expressions in rvalue context's. For most cases, this just + // loads the lvalue formed by the subscript expr. However, we have to be + // careful, because the base of a vector subscript is occasionally an rvalue, + // so we can't get it as an lvalue. + if (!E->getBase()->getType()->isVectorType()) + return EmitLoadOfLValue(E); + + // Handle the vector case. The base must be a vector, the index must be an + // integer value. + Value *Base = Visit(E->getBase()); + Value *Idx = Visit(E->getIdx()); + bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); + Idx = Builder.CreateIntCast(Idx, llvm::Type::Int32Ty, IdxSigned, + "vecidxcast"); + return Builder.CreateExtractElement(Base, Idx, "vecext"); +} + +/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but +/// also handle things like function to pointer-to-function decay, and array to +/// pointer decay. +Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) { + const Expr *Op = E->getSubExpr(); + + // If this is due to array->pointer conversion, emit the array expression as + // an l-value. + if (Op->getType()->isArrayType()) { + Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays. + + // Note that VLA pointers are always decayed, so we don't need to do + // anything here. + if (!Op->getType()->isVariableArrayType()) { + assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); + assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) + ->getElementType()) && + "Expected pointer to array"); + V = Builder.CreateStructGEP(V, 0, "arraydecay"); + } + + // The resultant pointer type can be implicitly casted to other pointer + // types as well (e.g. void*) and can be implicitly converted to integer. + const llvm::Type *DestTy = ConvertType(E->getType()); + if (V->getType() != DestTy) { + if (isa<llvm::PointerType>(DestTy)) + V = Builder.CreateBitCast(V, DestTy, "ptrconv"); + else { + assert(isa<llvm::IntegerType>(DestTy) && "Unknown array decay"); + V = Builder.CreatePtrToInt(V, DestTy, "ptrconv"); + } + } + return V; + } + + return EmitCastExpr(Op, E->getType()); +} + + +// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts +// have to handle a more broad range of conversions than explicit casts, as they +// handle things like function to ptr-to-function decay etc. +Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) { + if (!DestTy->isVoidType()) + TestAndClearIgnoreResultAssign(); + + // Handle cases where the source is an non-complex type. + + if (!CGF.hasAggregateLLVMType(E->getType())) { + Value *Src = Visit(const_cast<Expr*>(E)); + + // Use EmitScalarConversion to perform the conversion. + return EmitScalarConversion(Src, E->getType(), DestTy); + } + + if (E->getType()->isAnyComplexType()) { + // Handle cases where the source is a complex type. + bool IgnoreImag = true; + bool IgnoreImagAssign = true; + bool IgnoreReal = IgnoreResultAssign; + bool IgnoreRealAssign = IgnoreResultAssign; + if (DestTy->isBooleanType()) + IgnoreImagAssign = IgnoreImag = false; + else if (DestTy->isVoidType()) { + IgnoreReal = IgnoreImag = false; + IgnoreRealAssign = IgnoreImagAssign = true; + } + CodeGenFunction::ComplexPairTy V + = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign, + IgnoreImagAssign); + return EmitComplexToScalarConversion(V, E->getType(), DestTy); + } + + // Okay, this is a cast from an aggregate. It must be a cast to void. Just + // evaluate the result and return. + CGF.EmitAggExpr(E, 0, false, true); + return 0; +} + +Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { + return CGF.EmitCompoundStmt(*E->getSubStmt(), + !E->getType()->isVoidType()).getScalarVal(); +} + +Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) { + return Builder.CreateLoad(CGF.GetAddrOfBlockDecl(E), false, "tmp"); +} + +//===----------------------------------------------------------------------===// +// Unary Operators +//===----------------------------------------------------------------------===// + +Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre) { + LValue LV = EmitLValue(E->getSubExpr()); + QualType ValTy = E->getSubExpr()->getType(); + Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal(); + + int AmountVal = isInc ? 1 : -1; + + if (ValTy->isPointerType() && + ValTy->getAsPointerType()->isVariableArrayType()) { + // The amount of the addition/subtraction needs to account for the VLA size + CGF.ErrorUnsupported(E, "VLA pointer inc/dec"); + } + + Value *NextVal; + if (const llvm::PointerType *PT = + dyn_cast<llvm::PointerType>(InVal->getType())) { + llvm::Constant *Inc =llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal); + if (!isa<llvm::FunctionType>(PT->getElementType())) { + NextVal = Builder.CreateGEP(InVal, Inc, "ptrincdec"); + } else { + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp"); + NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec"); + NextVal = Builder.CreateBitCast(NextVal, InVal->getType()); + } + } else if (InVal->getType() == llvm::Type::Int1Ty && isInc) { + // Bool++ is an interesting case, due to promotion rules, we get: + // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 -> + // Bool = ((int)Bool+1) != 0 + // An interesting aspect of this is that increment is always true. + // Decrement does not have this property. + NextVal = llvm::ConstantInt::getTrue(); + } else { + // Add the inc/dec to the real part. + if (isa<llvm::IntegerType>(InVal->getType())) + NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); + else if (InVal->getType() == llvm::Type::FloatTy) + NextVal = + llvm::ConstantFP::get(llvm::APFloat(static_cast<float>(AmountVal))); + else if (InVal->getType() == llvm::Type::DoubleTy) + NextVal = + llvm::ConstantFP::get(llvm::APFloat(static_cast<double>(AmountVal))); + else { + llvm::APFloat F(static_cast<float>(AmountVal)); + bool ignored; + F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero, + &ignored); + NextVal = llvm::ConstantFP::get(F); + } + NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); + } + + // Store the updated result through the lvalue. + if (LV.isBitfield()) + CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, + &NextVal); + else + CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy); + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? NextVal : InVal; +} + + +Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { + TestAndClearIgnoreResultAssign(); + Value *Op = Visit(E->getSubExpr()); + return Builder.CreateNeg(Op, "neg"); +} + +Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { + TestAndClearIgnoreResultAssign(); + Value *Op = Visit(E->getSubExpr()); + return Builder.CreateNot(Op, "neg"); +} + +Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { + // Compare operand to zero. + Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); + + // Invert value. + // TODO: Could dynamically modify easy computations here. For example, if + // the operand is an icmp ne, turn into icmp eq. + BoolVal = Builder.CreateNot(BoolVal, "lnot"); + + // ZExt result to the expr type. + return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); +} + +/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of +/// argument of the sizeof expression as an integer. +Value * +ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { + QualType TypeToSize = E->getTypeOfArgument(); + if (E->isSizeOf()) { + if (const VariableArrayType *VAT = + CGF.getContext().getAsVariableArrayType(TypeToSize)) { + if (E->isArgumentType()) { + // sizeof(type) - make sure to emit the VLA size. + CGF.EmitVLASize(TypeToSize); + } else { + // C99 6.5.3.4p2: If the argument is an expression of type + // VLA, it is evaluated. + CGF.EmitAnyExpr(E->getArgumentExpr()); + } + + return CGF.GetVLASize(VAT); + } + } + + // If this isn't sizeof(vla), the result must be constant; use the + // constant folding logic so we don't have to duplicate it here. + Expr::EvalResult Result; + E->Evaluate(Result, CGF.getContext()); + return llvm::ConstantInt::get(Result.Val.getInt()); +} + +Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { + Expr *Op = E->getSubExpr(); + if (Op->getType()->isAnyComplexType()) + return CGF.EmitComplexExpr(Op, false, true, false, true).first; + return Visit(Op); +} +Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { + Expr *Op = E->getSubExpr(); + if (Op->getType()->isAnyComplexType()) + return CGF.EmitComplexExpr(Op, true, false, true, false).second; + + // __imag on a scalar returns zero. Emit the subexpr to ensure side + // effects are evaluated, but not the actual value. + if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) + CGF.EmitLValue(Op); + else + CGF.EmitScalarExpr(Op, true); + return llvm::Constant::getNullValue(ConvertType(E->getType())); +} + +Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) +{ + Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress(); + const llvm::Type* ResultType = ConvertType(E->getType()); + return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof"); +} + +//===----------------------------------------------------------------------===// +// Binary Operators +//===----------------------------------------------------------------------===// + +BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { + TestAndClearIgnoreResultAssign(); + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.Ty = E->getType(); + Result.E = E; + return Result; +} + +Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, + Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { + bool Ignore = TestAndClearIgnoreResultAssign(); + QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); + + BinOpInfo OpInfo; + + if (E->getComputationResultType()->isAnyComplexType()) { + // This needs to go through the complex expression emitter, but + // it's a tad complicated to do that... I'm leaving it out for now. + // (Note that we do actually need the imaginary part of the RHS for + // multiplication and division.) + CGF.ErrorUnsupported(E, "complex compound assignment"); + return llvm::UndefValue::get(CGF.ConvertType(E->getType())); + } + + // Emit the RHS first. __block variables need to have the rhs evaluated + // first, plus this should improve codegen a little. + OpInfo.RHS = Visit(E->getRHS()); + OpInfo.Ty = E->getComputationResultType(); + OpInfo.E = E; + // Load/convert the LHS. + LValue LHSLV = EmitLValue(E->getLHS()); + OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); + OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, + E->getComputationLHSType()); + + // Expand the binary operator. + Value *Result = (this->*Func)(OpInfo); + + // Convert the result back to the LHS type. + Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); + + // Store the result value into the LHS lvalue. Bit-fields are + // handled specially because the result is altered by the store, + // i.e., [C99 6.5.16p1] 'An assignment expression has the value of + // the left operand after the assignment...'. + if (LHSLV.isBitfield()) { + if (!LHSLV.isVolatileQualified()) { + CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, + &Result); + return Result; + } else + CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy); + } else + CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy); + if (Ignore) + return 0; + return EmitLoadOfLValue(LHSLV, E->getType()); +} + + +Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { + if (Ops.LHS->getType()->isFPOrFPVector()) + return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); + else if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); + else + return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); +} + +Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { + // Rem in C can't be a floating point type: C99 6.5.5p2. + if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); + else + return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); +} + +Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { + unsigned IID; + unsigned OpID = 0; + + switch (Ops.E->getOpcode()) { + case BinaryOperator::Add: + case BinaryOperator::AddAssign: + OpID = 1; + IID = llvm::Intrinsic::sadd_with_overflow; + break; + case BinaryOperator::Sub: + case BinaryOperator::SubAssign: + OpID = 2; + IID = llvm::Intrinsic::ssub_with_overflow; + break; + case BinaryOperator::Mul: + case BinaryOperator::MulAssign: + OpID = 3; + IID = llvm::Intrinsic::smul_with_overflow; + break; + default: + assert(false && "Unsupported operation for overflow detection"); + IID = 0; + } + OpID <<= 1; + OpID |= 1; + + const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); + + llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1); + + Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS); + Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); + Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); + + // Branch in case of overflow. + llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); + llvm::BasicBlock *overflowBB = + CGF.createBasicBlock("overflow", CGF.CurFn); + llvm::BasicBlock *continueBB = + CGF.createBasicBlock("overflow.continue", CGF.CurFn); + + Builder.CreateCondBr(overflow, overflowBB, continueBB); + + // Handle overflow + + Builder.SetInsertPoint(overflowBB); + + // Handler is: + // long long *__overflow_handler)(long long a, long long b, char op, + // char width) + std::vector<const llvm::Type*> handerArgTypes; + handerArgTypes.push_back(llvm::Type::Int64Ty); + handerArgTypes.push_back(llvm::Type::Int64Ty); + handerArgTypes.push_back(llvm::Type::Int8Ty); + handerArgTypes.push_back(llvm::Type::Int8Ty); + llvm::FunctionType *handlerTy = llvm::FunctionType::get(llvm::Type::Int64Ty, + handerArgTypes, false); + llvm::Value *handlerFunction = + CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", + llvm::PointerType::getUnqual(handlerTy)); + handlerFunction = Builder.CreateLoad(handlerFunction); + + llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, + Builder.CreateSExt(Ops.LHS, llvm::Type::Int64Ty), + Builder.CreateSExt(Ops.RHS, llvm::Type::Int64Ty), + llvm::ConstantInt::get(llvm::Type::Int8Ty, OpID), + llvm::ConstantInt::get(llvm::Type::Int8Ty, + cast<llvm::IntegerType>(opTy)->getBitWidth())); + + handlerResult = Builder.CreateTrunc(handlerResult, opTy); + + Builder.CreateBr(continueBB); + + // Set up the continuation + Builder.SetInsertPoint(continueBB); + // Get the correct result + llvm::PHINode *phi = Builder.CreatePHI(opTy); + phi->reserveOperandSpace(2); + phi->addIncoming(result, initialBB); + phi->addIncoming(handlerResult, overflowBB); + + return phi; +} + +Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { + if (!Ops.Ty->isPointerType()) { + if (CGF.getContext().getLangOptions().OverflowChecking + && Ops.Ty->isSignedIntegerType()) + return EmitOverflowCheckedBinOp(Ops); + return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); + } + + if (Ops.Ty->getAsPointerType()->isVariableArrayType()) { + // The amount of the addition needs to account for the VLA size + CGF.ErrorUnsupported(Ops.E, "VLA pointer addition"); + } + Value *Ptr, *Idx; + Expr *IdxExp; + const PointerType *PT; + if ((PT = Ops.E->getLHS()->getType()->getAsPointerType())) { + Ptr = Ops.LHS; + Idx = Ops.RHS; + IdxExp = Ops.E->getRHS(); + } else { // int + pointer + PT = Ops.E->getRHS()->getType()->getAsPointerType(); + assert(PT && "Invalid add expr"); + Ptr = Ops.RHS; + Idx = Ops.LHS; + IdxExp = Ops.E->getLHS(); + } + + unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (Width < CGF.LLVMPointerWidth) { + // Zero or sign extend the pointer value based on whether the index is + // signed or not. + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + if (IdxExp->getType()->isSignedIntegerType()) + Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); + else + Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); + } + + const QualType ElementType = PT->getPointeeType(); + // Handle interface types, which are not represented with a concrete + // type. + if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) { + llvm::Value *InterfaceSize = + llvm::ConstantInt::get(Idx->getType(), + CGF.getContext().getTypeSize(OIT) / 8); + Idx = Builder.CreateMul(Idx, InterfaceSize); + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); + Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); + return Builder.CreateBitCast(Res, Ptr->getType()); + } + + // Explicitly handle GNU void* and function pointer arithmetic + // extensions. The GNU void* casts amount to no-ops since our void* + // type is i8*, but this is future proof. + if (ElementType->isVoidType() || ElementType->isFunctionType()) { + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); + Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); + return Builder.CreateBitCast(Res, Ptr->getType()); + } + + return Builder.CreateGEP(Ptr, Idx, "add.ptr"); +} + +Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { + if (!isa<llvm::PointerType>(Ops.LHS->getType())) { + if (CGF.getContext().getLangOptions().OverflowChecking + && Ops.Ty->isSignedIntegerType()) + return EmitOverflowCheckedBinOp(Ops); + return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); + } + + if (Ops.E->getLHS()->getType()->getAsPointerType()->isVariableArrayType()) { + // The amount of the addition needs to account for the VLA size for + // ptr-int + // The amount of the division needs to account for the VLA size for + // ptr-ptr. + CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction"); + } + + const QualType LHSType = Ops.E->getLHS()->getType(); + const QualType LHSElementType = LHSType->getAsPointerType()->getPointeeType(); + if (!isa<llvm::PointerType>(Ops.RHS->getType())) { + // pointer - int + Value *Idx = Ops.RHS; + unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (Width < CGF.LLVMPointerWidth) { + // Zero or sign extend the pointer value based on whether the index is + // signed or not. + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + if (Ops.E->getRHS()->getType()->isSignedIntegerType()) + Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); + else + Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); + } + Idx = Builder.CreateNeg(Idx, "sub.ptr.neg"); + + // Handle interface types, which are not represented with a concrete + // type. + if (const ObjCInterfaceType *OIT = + dyn_cast<ObjCInterfaceType>(LHSElementType)) { + llvm::Value *InterfaceSize = + llvm::ConstantInt::get(Idx->getType(), + CGF.getContext().getTypeSize(OIT) / 8); + Idx = Builder.CreateMul(Idx, InterfaceSize); + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); + Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); + return Builder.CreateBitCast(Res, Ops.LHS->getType()); + } + + // Explicitly handle GNU void* and function pointer arithmetic + // extensions. The GNU void* casts amount to no-ops since our + // void* type is i8*, but this is future proof. + if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); + Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); + return Builder.CreateBitCast(Res, Ops.LHS->getType()); + } + + return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr"); + } else { + // pointer - pointer + Value *LHS = Ops.LHS; + Value *RHS = Ops.RHS; + + uint64_t ElementSize; + + // Handle GCC extension for pointer arithmetic on void* and function pointer + // types. + if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { + ElementSize = 1; + } else { + ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8; + } + + const llvm::Type *ResultType = ConvertType(Ops.Ty); + LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); + RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); + Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); + + // Optimize out the shift for element size of 1. + if (ElementSize == 1) + return BytesBetween; + + // HACK: LLVM doesn't have an divide instruction that 'knows' there is no + // remainder. As such, we handle common power-of-two cases here to generate + // better code. See PR2247. + if (llvm::isPowerOf2_64(ElementSize)) { + Value *ShAmt = + llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize)); + return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr"); + } + + // Otherwise, do a full sdiv. + Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize); + return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); + } +} + +Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { + // LLVM requires the LHS and RHS to be the same type: promote or truncate the + // RHS to the same size as the LHS. + Value *RHS = Ops.RHS; + if (Ops.LHS->getType() != RHS->getType()) + RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); + + return Builder.CreateShl(Ops.LHS, RHS, "shl"); +} + +Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { + // LLVM requires the LHS and RHS to be the same type: promote or truncate the + // RHS to the same size as the LHS. + Value *RHS = Ops.RHS; + if (Ops.LHS->getType() != RHS->getType()) + RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); + + if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateLShr(Ops.LHS, RHS, "shr"); + return Builder.CreateAShr(Ops.LHS, RHS, "shr"); +} + +Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, + unsigned SICmpOpc, unsigned FCmpOpc) { + TestAndClearIgnoreResultAssign(); + Value *Result; + QualType LHSTy = E->getLHS()->getType(); + if (!LHSTy->isAnyComplexType() && !LHSTy->isVectorType()) { + Value *LHS = Visit(E->getLHS()); + Value *RHS = Visit(E->getRHS()); + + if (LHS->getType()->isFloatingPoint()) { + Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, + LHS, RHS, "cmp"); + } else if (LHSTy->isSignedIntegerType()) { + Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, + LHS, RHS, "cmp"); + } else { + // Unsigned integers and pointers. + Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS, RHS, "cmp"); + } + } else if (LHSTy->isVectorType()) { + Value *LHS = Visit(E->getLHS()); + Value *RHS = Visit(E->getRHS()); + + if (LHS->getType()->isFPOrFPVector()) { + Result = Builder.CreateVFCmp((llvm::CmpInst::Predicate)FCmpOpc, + LHS, RHS, "cmp"); + } else if (LHSTy->isUnsignedIntegerType()) { + Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)UICmpOpc, + LHS, RHS, "cmp"); + } else { + // Signed integers and pointers. + Result = Builder.CreateVICmp((llvm::CmpInst::Predicate)SICmpOpc, + LHS, RHS, "cmp"); + } + return Result; + } else { + // Complex Comparison: can only be an equality comparison. + CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); + CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); + + QualType CETy = LHSTy->getAsComplexType()->getElementType(); + + Value *ResultR, *ResultI; + if (CETy->isRealFloatingType()) { + ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, + LHS.first, RHS.first, "cmp.r"); + ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, + LHS.second, RHS.second, "cmp.i"); + } else { + // Complex comparisons can only be equality comparisons. As such, signed + // and unsigned opcodes are the same. + ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS.first, RHS.first, "cmp.r"); + ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS.second, RHS.second, "cmp.i"); + } + + if (E->getOpcode() == BinaryOperator::EQ) { + Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); + } else { + assert(E->getOpcode() == BinaryOperator::NE && + "Complex comparison other than == or != ?"); + Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); + } + } + + return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); +} + +Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { + bool Ignore = TestAndClearIgnoreResultAssign(); + + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen just a little. + Value *RHS = Visit(E->getRHS()); + LValue LHS = EmitLValue(E->getLHS()); + + // Store the value into the LHS. Bit-fields are handled specially + // because the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after + // the assignment...'. + if (LHS.isBitfield()) { + if (!LHS.isVolatileQualified()) { + CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(), + &RHS); + return RHS; + } else + CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType()); + } else + CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); + if (Ignore) + return 0; + return EmitLoadOfLValue(LHS, E->getType()); +} + +Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { + // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. + // If we have 1 && X, just emit X without inserting the control flow. + if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { + if (Cond == 1) { // If we have 1 && X, just emit X. + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + // ZExt result to int. + return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext"); + } + + // 0 && RHS: If it is safe, just elide the RHS, and return 0. + if (!CGF.ContainsLabel(E->getRHS())) + return llvm::Constant::getNullValue(CGF.LLVMIntTy); + } + + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); + llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); + + // Branch on the LHS first. If it is false, go to the failure (cont) block. + CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock); + + // Any edges into the ContBlock are now from an (indeterminate number of) + // edges from this first condition. All of these values will be false. Start + // setting up the PHI node in the Cont Block for this. + llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); + PN->reserveOperandSpace(2); // Normal case, two inputs. + for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); + PI != PE; ++PI) + PN->addIncoming(llvm::ConstantInt::getFalse(), *PI); + + CGF.EmitBlock(RHSBlock); + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + + // Reaquire the RHS block, as there may be subblocks inserted. + RHSBlock = Builder.GetInsertBlock(); + + // Emit an unconditional branch from this block to ContBlock. Insert an entry + // into the phi node for the edge with the value of RHSCond. + CGF.EmitBlock(ContBlock); + PN->addIncoming(RHSCond, RHSBlock); + + // ZExt result to int. + return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext"); +} + +Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { + // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. + // If we have 0 || X, just emit X without inserting the control flow. + if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) { + if (Cond == -1) { // If we have 0 || X, just emit X. + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + // ZExt result to int. + return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext"); + } + + // 1 || RHS: If it is safe, just elide the RHS, and return 1. + if (!CGF.ContainsLabel(E->getRHS())) + return llvm::ConstantInt::get(CGF.LLVMIntTy, 1); + } + + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); + llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); + + // Branch on the LHS first. If it is true, go to the success (cont) block. + CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock); + + // Any edges into the ContBlock are now from an (indeterminate number of) + // edges from this first condition. All of these values will be true. Start + // setting up the PHI node in the Cont Block for this. + llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::Int1Ty, "", ContBlock); + PN->reserveOperandSpace(2); // Normal case, two inputs. + for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); + PI != PE; ++PI) + PN->addIncoming(llvm::ConstantInt::getTrue(), *PI); + + // Emit the RHS condition as a bool value. + CGF.EmitBlock(RHSBlock); + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + + // Reaquire the RHS block, as there may be subblocks inserted. + RHSBlock = Builder.GetInsertBlock(); + + // Emit an unconditional branch from this block to ContBlock. Insert an entry + // into the phi node for the edge with the value of RHSCond. + CGF.EmitBlock(ContBlock); + PN->addIncoming(RHSCond, RHSBlock); + + // ZExt result to int. + return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext"); +} + +Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.EmitStmt(E->getLHS()); + CGF.EnsureInsertPoint(); + return Visit(E->getRHS()); +} + +//===----------------------------------------------------------------------===// +// Other Operators +//===----------------------------------------------------------------------===// + +/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified +/// expression is cheap enough and side-effect-free enough to evaluate +/// unconditionally instead of conditionally. This is used to convert control +/// flow into selects in some cases. +static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) { + if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) + return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr()); + + // TODO: Allow anything we can constant fold to an integer or fp constant. + if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) || + isa<FloatingLiteral>(E)) + return true; + + // Non-volatile automatic variables too, to get "cond ? X : Y" where + // X and Y are local variables. + if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) + if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) + if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified()) + return true; + + return false; +} + + +Value *ScalarExprEmitter:: +VisitConditionalOperator(const ConditionalOperator *E) { + TestAndClearIgnoreResultAssign(); + // If the condition constant folds and can be elided, try to avoid emitting + // the condition and the dead arm. + if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){ + Expr *Live = E->getLHS(), *Dead = E->getRHS(); + if (Cond == -1) + std::swap(Live, Dead); + + // If the dead side doesn't have labels we need, and if the Live side isn't + // the gnu missing ?: extension (which we could handle, but don't bother + // to), just emit the Live part. + if ((!Dead || !CGF.ContainsLabel(Dead)) && // No labels in dead part + Live) // Live part isn't missing. + return Visit(Live); + } + + + // If this is a really simple expression (like x ? 4 : 5), emit this as a + // select instead of as control flow. We can only do this if it is cheap and + // safe to evaluate the LHS and RHS unconditionally. + if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS()) && + isCheapEnoughToEvaluateUnconditionally(E->getRHS())) { + llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond()); + llvm::Value *LHS = Visit(E->getLHS()); + llvm::Value *RHS = Visit(E->getRHS()); + return Builder.CreateSelect(CondV, LHS, RHS, "cond"); + } + + + llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); + llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); + Value *CondVal = 0; + + // If we don't have the GNU missing condition extension, emit a branch on + // bool the normal way. + if (E->getLHS()) { + // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for + // the branch on bool. + CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock); + } else { + // Otherwise, for the ?: extension, evaluate the conditional and then + // convert it to bool the hard way. We do this explicitly because we need + // the unconverted value for the missing middle value of the ?:. + CondVal = CGF.EmitScalarExpr(E->getCond()); + + // In some cases, EmitScalarConversion will delete the "CondVal" expression + // if there are no extra uses (an optimization). Inhibit this by making an + // extra dead use, because we're going to add a use of CondVal later. We + // don't use the builder for this, because we don't want it to get optimized + // away. This leaves dead code, but the ?: extension isn't common. + new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder", + Builder.GetInsertBlock()); + + Value *CondBoolVal = + CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), + CGF.getContext().BoolTy); + Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); + } + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + Value *LHS; + if (E->getLHS()) + LHS = Visit(E->getLHS()); + else // Perform promotions, to handle cases like "short ?: int" + LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); + + LHSBlock = Builder.GetInsertBlock(); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(RHSBlock); + + Value *RHS = Visit(E->getRHS()); + RHSBlock = Builder.GetInsertBlock(); + CGF.EmitBranch(ContBlock); + + CGF.EmitBlock(ContBlock); + + if (!LHS || !RHS) { + assert(E->getType()->isVoidType() && "Non-void value should have a value"); + return 0; + } + + // Create a PHI node for the real part. + llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); + PN->reserveOperandSpace(2); + PN->addIncoming(LHS, LHSBlock); + PN->addIncoming(RHS, RHSBlock); + return PN; +} + +Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { + return Visit(E->getChosenSubExpr(CGF.getContext())); +} + +Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { + llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); + llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); + + // If EmitVAArg fails, we fall back to the LLVM instruction. + if (!ArgPtr) + return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); + + // FIXME Volatility. + return Builder.CreateLoad(ArgPtr); +} + +Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) { + return CGF.BuildBlockLiteralTmp(BE); +} + +//===----------------------------------------------------------------------===// +// Entry Point into this File +//===----------------------------------------------------------------------===// + +/// EmitScalarExpr - Emit the computation of the specified expression of +/// scalar type, ignoring the result. +Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { + assert(E && !hasAggregateLLVMType(E->getType()) && + "Invalid scalar expression to emit"); + + return ScalarExprEmitter(*this, IgnoreResultAssign) + .Visit(const_cast<Expr*>(E)); +} + +/// EmitScalarConversion - Emit a conversion from the specified type to the +/// specified destination type, both of which are LLVM scalar types. +Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, + QualType DstTy) { + assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); +} + +/// EmitComplexToScalarConversion - Emit a conversion from the specified +/// complex type to the specified destination type, where the destination +/// type is an LLVM scalar type. +Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, + QualType SrcTy, + QualType DstTy) { + assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) && + "Invalid complex -> scalar conversion"); + return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, + DstTy); +} + +Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { + assert(V1->getType() == V2->getType() && + "Vector operands must be of the same type"); + unsigned NumElements = + cast<llvm::VectorType>(V1->getType())->getNumElements(); + + va_list va; + va_start(va, V2); + + llvm::SmallVector<llvm::Constant*, 16> Args; + for (unsigned i = 0; i < NumElements; i++) { + int n = va_arg(va, int); + assert(n >= 0 && n < (int)NumElements * 2 && + "Vector shuffle index out of bounds!"); + Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n)); + } + + const char *Name = va_arg(va, const char *); + va_end(va); + + llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); + + return Builder.CreateShuffleVector(V1, V2, Mask, Name); +} + +llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, + unsigned NumVals, bool isSplat) { + llvm::Value *Vec + = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals)); + + for (unsigned i = 0, e = NumVals; i != e; ++i) { + llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp"); + } + + return Vec; +} diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp new file mode 100644 index 000000000000..51f9a7657928 --- /dev/null +++ b/lib/CodeGen/CGObjC.cpp @@ -0,0 +1,644 @@ +//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Objective-C code as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CGObjCRuntime.h" +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/StmtObjC.h" +#include "clang/Basic/Diagnostic.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Target/TargetData.h" +using namespace clang; +using namespace CodeGen; + +/// Emits an instance of NSConstantString representing the object. +llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) +{ + llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E); + // FIXME: This bitcast should just be made an invariant on the Runtime. + return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); +} + +/// Emit a selector. +llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { + // Untyped selector. + // Note that this implementation allows for non-constant strings to be passed + // as arguments to @selector(). Currently, the only thing preventing this + // behaviour is the type checking in the front end. + return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector()); +} + +llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { + // FIXME: This should pass the Decl not the name. + return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol()); +} + + +RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) { + // Only the lookup mechanism and first two arguments of the method + // implementation vary between runtimes. We can get the receiver and + // arguments in generic code. + + CGObjCRuntime &Runtime = CGM.getObjCRuntime(); + const Expr *ReceiverExpr = E->getReceiver(); + bool isSuperMessage = false; + bool isClassMessage = false; + // Find the receiver + llvm::Value *Receiver; + if (!ReceiverExpr) { + const ObjCInterfaceDecl *OID = E->getClassInfo().first; + + // Very special case, super send in class method. The receiver is + // self (the class object) and the send uses super semantics. + if (!OID) { + assert(E->getClassName()->isStr("super") && + "Unexpected missing class interface in message send."); + isSuperMessage = true; + Receiver = LoadObjCSelf(); + } else { + Receiver = Runtime.GetClass(Builder, OID); + } + + isClassMessage = true; + } else if (isa<ObjCSuperExpr>(E->getReceiver())) { + isSuperMessage = true; + Receiver = LoadObjCSelf(); + } else { + Receiver = EmitScalarExpr(E->getReceiver()); + } + + CallArgList Args; + EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end()); + + if (isSuperMessage) { + // super is only valid in an Objective-C method + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); + return Runtime.GenerateMessageSendSuper(*this, E->getType(), + E->getSelector(), + OMD->getClassInterface(), + isCategoryImpl, + Receiver, + isClassMessage, + Args); + } + return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(), + Receiver, isClassMessage, Args, + E->getMethodDecl()); +} + +/// StartObjCMethod - Begin emission of an ObjCMethod. This generates +/// the LLVM function and sets the other context used by +/// CodeGenFunction. +void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, + const ObjCContainerDecl *CD) { + FunctionArgList Args; + llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); + + const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD); + CGM.SetInternalFunctionAttributes(OMD, Fn, FI); + + Args.push_back(std::make_pair(OMD->getSelfDecl(), + OMD->getSelfDecl()->getType())); + Args.push_back(std::make_pair(OMD->getCmdDecl(), + OMD->getCmdDecl()->getType())); + + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) + Args.push_back(std::make_pair(*PI, (*PI)->getType())); + + StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocEnd()); +} + +/// Generate an Objective-C method. An Objective-C method is a C function with +/// its pointer, name, and types registered in the class struture. +void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { + // Check if we should generate debug info for this method. + if (CGM.getDebugInfo() && !OMD->hasAttr<NodebugAttr>()) + DebugInfo = CGM.getDebugInfo(); + StartObjCMethod(OMD, OMD->getClassInterface()); + EmitStmt(OMD->getBody(getContext())); + FinishFunction(OMD->getBodyRBrace(getContext())); +} + +// FIXME: I wasn't sure about the synthesis approach. If we end up generating an +// AST for the whole body we can just fall back to having a GenerateFunction +// which takes the body Stmt. + +/// GenerateObjCGetter - Generate an Objective-C property getter +/// function. The given Decl must be an ObjCImplementationDecl. @synthesize +/// is illegal within a category. +void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, + const ObjCPropertyImplDecl *PID) { + ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl(); + const ObjCPropertyDecl *PD = PID->getPropertyDecl(); + ObjCMethodDecl *OMD = PD->getGetterMethodDecl(); + assert(OMD && "Invalid call to generate getter (empty method)"); + // FIXME: This is rather murky, we create this here since they will not have + // been created by Sema for us. + OMD->createImplicitParams(getContext(), IMP->getClassInterface()); + StartObjCMethod(OMD, IMP->getClassInterface()); + + // Determine if we should use an objc_getProperty call for + // this. Non-atomic properties are directly evaluated. + // atomic 'copy' and 'retain' properties are also directly + // evaluated in gc-only mode. + if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly && + !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) && + (PD->getSetterKind() == ObjCPropertyDecl::Copy || + PD->getSetterKind() == ObjCPropertyDecl::Retain)) { + llvm::Value *GetPropertyFn = + CGM.getObjCRuntime().GetPropertyGetFunction(); + + if (!GetPropertyFn) { + CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy"); + FinishFunction(); + return; + } + + // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). + // FIXME: Can't this be simpler? This might even be worse than the + // corresponding gcc code. + CodeGenTypes &Types = CGM.getTypes(); + ValueDecl *Cmd = OMD->getCmdDecl(); + llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd"); + QualType IdTy = getContext().getObjCIdType(); + llvm::Value *SelfAsId = + Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); + llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar); + llvm::Value *True = + llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1); + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy)); + Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType())); + Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy)); + Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy)); + // FIXME: We shouldn't need to get the function info here, the + // runtime already should have computed it to build the function. + RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args), + GetPropertyFn, Args); + // We need to fix the type here. Ivars with copy & retain are + // always objects so we don't need to worry about complex or + // aggregates. + RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(), + Types.ConvertType(PD->getType()))); + EmitReturnOfRValue(RV, PD->getType()); + } else { + LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0); + if (hasAggregateLLVMType(Ivar->getType())) { + EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType()); + } + else { + CodeGenTypes &Types = CGM.getTypes(); + RValue RV = EmitLoadOfLValue(LV, Ivar->getType()); + RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(), + Types.ConvertType(PD->getType()))); + EmitReturnOfRValue(RV, PD->getType()); + } + } + + FinishFunction(); +} + +/// GenerateObjCSetter - Generate an Objective-C property setter +/// function. The given Decl must be an ObjCImplementationDecl. @synthesize +/// is illegal within a category. +void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, + const ObjCPropertyImplDecl *PID) { + ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl(); + const ObjCPropertyDecl *PD = PID->getPropertyDecl(); + ObjCMethodDecl *OMD = PD->getSetterMethodDecl(); + assert(OMD && "Invalid call to generate setter (empty method)"); + // FIXME: This is rather murky, we create this here since they will not have + // been created by Sema for us. + OMD->createImplicitParams(getContext(), IMP->getClassInterface()); + StartObjCMethod(OMD, IMP->getClassInterface()); + + bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy; + bool IsAtomic = + !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic); + + // Determine if we should use an objc_setProperty call for + // this. Properties with 'copy' semantics always use it, as do + // non-atomic properties with 'release' semantics as long as we are + // not in gc-only mode. + if (IsCopy || + (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly && + PD->getSetterKind() == ObjCPropertyDecl::Retain)) { + llvm::Value *SetPropertyFn = + CGM.getObjCRuntime().GetPropertySetFunction(); + + if (!SetPropertyFn) { + CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy"); + FinishFunction(); + return; + } + + // Emit objc_setProperty((id) self, _cmd, offset, arg, + // <is-atomic>, <is-copy>). + // FIXME: Can't this be simpler? This might even be worse than the + // corresponding gcc code. + CodeGenTypes &Types = CGM.getTypes(); + ValueDecl *Cmd = OMD->getCmdDecl(); + llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd"); + QualType IdTy = getContext().getObjCIdType(); + llvm::Value *SelfAsId = + Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); + llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar); + llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()]; + llvm::Value *ArgAsId = + Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"), + Types.ConvertType(IdTy)); + llvm::Value *True = + llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1); + llvm::Value *False = + llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0); + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy)); + Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType())); + Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy)); + Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy)); + Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False), + getContext().BoolTy)); + Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False), + getContext().BoolTy)); + // FIXME: We shouldn't need to get the function info here, the runtime + // already should have computed it to build the function. + EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args), + SetPropertyFn, Args); + } else { + SourceLocation Loc = PD->getLocation(); + ValueDecl *Self = OMD->getSelfDecl(); + ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl(); + DeclRefExpr Base(Self, Self->getType(), Loc); + ParmVarDecl *ArgDecl = *OMD->param_begin(); + DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc); + ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, + true, true); + BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign, + Ivar->getType(), Loc); + EmitStmt(&Assign); + } + + FinishFunction(); +} + +llvm::Value *CodeGenFunction::LoadObjCSelf() { + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + // See if we need to lazily forward self inside a block literal. + BlockForwardSelf(); + return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self"); +} + +QualType CodeGenFunction::TypeOfSelfObject() { + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); + const PointerType *PTy = + cast<PointerType>(getContext().getCanonicalType(selfDecl->getType())); + return PTy->getPointeeType(); +} + +RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp, + const Selector &S) { + llvm::Value *Receiver = LoadObjCSelf(); + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + bool isClassMessage = OMD->isClassMethod(); + bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); + return CGM.getObjCRuntime().GenerateMessageSendSuper(*this, + Exp->getType(), + S, + OMD->getClassInterface(), + isCategoryImpl, + Receiver, + isClassMessage, + CallArgList()); + +} + +RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) { + // FIXME: Split it into two separate routines. + if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) { + Selector S = E->getProperty()->getGetterName(); + if (isa<ObjCSuperExpr>(E->getBase())) + return EmitObjCSuperPropertyGet(E, S); + return CGM.getObjCRuntime(). + GenerateMessageSend(*this, Exp->getType(), S, + EmitScalarExpr(E->getBase()), + false, CallArgList()); + } + else { + const ObjCKVCRefExpr *KE = cast<ObjCKVCRefExpr>(Exp); + Selector S = KE->getGetterMethod()->getSelector(); + llvm::Value *Receiver; + if (KE->getClassProp()) { + const ObjCInterfaceDecl *OID = KE->getClassProp(); + Receiver = CGM.getObjCRuntime().GetClass(Builder, OID); + } + else if (isa<ObjCSuperExpr>(KE->getBase())) + return EmitObjCSuperPropertyGet(KE, S); + else + Receiver = EmitScalarExpr(KE->getBase()); + return CGM.getObjCRuntime(). + GenerateMessageSend(*this, Exp->getType(), S, + Receiver, + KE->getClassProp() != 0, CallArgList()); + } +} + +void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp, + const Selector &S, + RValue Src) { + CallArgList Args; + llvm::Value *Receiver = LoadObjCSelf(); + const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); + bool isClassMessage = OMD->isClassMethod(); + bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); + Args.push_back(std::make_pair(Src, Exp->getType())); + CGM.getObjCRuntime().GenerateMessageSendSuper(*this, + Exp->getType(), + S, + OMD->getClassInterface(), + isCategoryImpl, + Receiver, + isClassMessage, + Args); + return; +} + +void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp, + RValue Src) { + // FIXME: Split it into two separate routines. + if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) { + Selector S = E->getProperty()->getSetterName(); + if (isa<ObjCSuperExpr>(E->getBase())) { + EmitObjCSuperPropertySet(E, S, Src); + return; + } + CallArgList Args; + Args.push_back(std::make_pair(Src, E->getType())); + CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S, + EmitScalarExpr(E->getBase()), + false, Args); + } + else if (const ObjCKVCRefExpr *E = dyn_cast<ObjCKVCRefExpr>(Exp)) { + Selector S = E->getSetterMethod()->getSelector(); + CallArgList Args; + llvm::Value *Receiver; + if (E->getClassProp()) { + const ObjCInterfaceDecl *OID = E->getClassProp(); + Receiver = CGM.getObjCRuntime().GetClass(Builder, OID); + } + else if (isa<ObjCSuperExpr>(E->getBase())) { + EmitObjCSuperPropertySet(E, S, Src); + return; + } + else + Receiver = EmitScalarExpr(E->getBase()); + Args.push_back(std::make_pair(Src, E->getType())); + CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S, + Receiver, + E->getClassProp() != 0, Args); + } + else + assert (0 && "bad expression node in EmitObjCPropertySet"); +} + +void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ + llvm::Constant *EnumerationMutationFn = + CGM.getObjCRuntime().EnumerationMutationFunction(); + llvm::Value *DeclAddress; + QualType ElementTy; + + if (!EnumerationMutationFn) { + CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); + return; + } + + if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { + EmitStmt(SD); + assert(HaveInsertPoint() && "DeclStmt destroyed insert point!"); + const Decl* D = SD->getSingleDecl(); + ElementTy = cast<ValueDecl>(D)->getType(); + DeclAddress = LocalDeclMap[D]; + } else { + ElementTy = cast<Expr>(S.getElement())->getType(); + DeclAddress = 0; + } + + // Fast enumeration state. + QualType StateTy = getContext().getObjCFastEnumerationStateType(); + llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy), + "state.ptr"); + StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3); + EmitMemSetToZero(StatePtr, StateTy); + + // Number of elements in the items array. + static const unsigned NumItems = 16; + + // Get selector + llvm::SmallVector<IdentifierInfo*, 3> II; + II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState")); + II.push_back(&CGM.getContext().Idents.get("objects")); + II.push_back(&CGM.getContext().Idents.get("count")); + Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(), + &II[0]); + + QualType ItemsTy = + getContext().getConstantArrayType(getContext().getObjCIdType(), + llvm::APInt(32, NumItems), + ArrayType::Normal, 0); + llvm::Value *ItemsPtr = CreateTempAlloca(ConvertType(ItemsTy), "items.ptr"); + + llvm::Value *Collection = EmitScalarExpr(S.getCollection()); + + CallArgList Args; + Args.push_back(std::make_pair(RValue::get(StatePtr), + getContext().getPointerType(StateTy))); + + Args.push_back(std::make_pair(RValue::get(ItemsPtr), + getContext().getPointerType(ItemsTy))); + + const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy); + llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems); + Args.push_back(std::make_pair(RValue::get(Count), + getContext().UnsignedLongTy)); + + RValue CountRV = + CGM.getObjCRuntime().GenerateMessageSend(*this, + getContext().UnsignedLongTy, + FastEnumSel, + Collection, false, Args); + + llvm::Value *LimitPtr = CreateTempAlloca(UnsignedLongLTy, "limit.ptr"); + Builder.CreateStore(CountRV.getScalarVal(), LimitPtr); + + llvm::BasicBlock *NoElements = createBasicBlock("noelements"); + llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations"); + + llvm::Value *Limit = Builder.CreateLoad(LimitPtr); + llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy); + + llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero"); + Builder.CreateCondBr(IsZero, NoElements, SetStartMutations); + + EmitBlock(SetStartMutations); + + llvm::Value *StartMutationsPtr = + CreateTempAlloca(UnsignedLongLTy); + + llvm::Value *StateMutationsPtrPtr = + Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); + llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, + "mutationsptr"); + + llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr, + "mutations"); + + Builder.CreateStore(StateMutations, StartMutationsPtr); + + llvm::BasicBlock *LoopStart = createBasicBlock("loopstart"); + EmitBlock(LoopStart); + + llvm::Value *CounterPtr = CreateTempAlloca(UnsignedLongLTy, "counter.ptr"); + Builder.CreateStore(Zero, CounterPtr); + + llvm::BasicBlock *LoopBody = createBasicBlock("loopbody"); + EmitBlock(LoopBody); + + StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); + StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations"); + + llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr, + "mutations"); + llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations, + StartMutations, + "tobool"); + + + llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated"); + llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated"); + + Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated); + + EmitBlock(WasMutated); + llvm::Value *V = + Builder.CreateBitCast(Collection, + ConvertType(getContext().getObjCIdType()), + "tmp"); + CallArgList Args2; + Args2.push_back(std::make_pair(RValue::get(V), + getContext().getObjCIdType())); + // FIXME: We shouldn't need to get the function info here, the runtime already + // should have computed it to build the function. + EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2), + EnumerationMutationFn, Args2); + + EmitBlock(WasNotMutated); + + llvm::Value *StateItemsPtr = + Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); + + llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter"); + + llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr, + "stateitems"); + + llvm::Value *CurrentItemPtr = + Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr"); + + llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem"); + + // Cast the item to the right type. + CurrentItem = Builder.CreateBitCast(CurrentItem, + ConvertType(ElementTy), "tmp"); + + if (!DeclAddress) { + LValue LV = EmitLValue(cast<Expr>(S.getElement())); + + // Set the value to null. + Builder.CreateStore(CurrentItem, LV.getAddress()); + } else + Builder.CreateStore(CurrentItem, DeclAddress); + + // Increment the counter. + Counter = Builder.CreateAdd(Counter, + llvm::ConstantInt::get(UnsignedLongLTy, 1)); + Builder.CreateStore(Counter, CounterPtr); + + llvm::BasicBlock *LoopEnd = createBasicBlock("loopend"); + llvm::BasicBlock *AfterBody = createBasicBlock("afterbody"); + + BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); + + EmitStmt(S.getBody()); + + BreakContinueStack.pop_back(); + + EmitBlock(AfterBody); + + llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore"); + + Counter = Builder.CreateLoad(CounterPtr); + Limit = Builder.CreateLoad(LimitPtr); + llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless"); + Builder.CreateCondBr(IsLess, LoopBody, FetchMore); + + // Fetch more elements. + EmitBlock(FetchMore); + + CountRV = + CGM.getObjCRuntime().GenerateMessageSend(*this, + getContext().UnsignedLongTy, + FastEnumSel, + Collection, false, Args); + Builder.CreateStore(CountRV.getScalarVal(), LimitPtr); + Limit = Builder.CreateLoad(LimitPtr); + + IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero"); + Builder.CreateCondBr(IsZero, NoElements, LoopStart); + + // No more elements. + EmitBlock(NoElements); + + if (!DeclAddress) { + // If the element was not a declaration, set it to be null. + + LValue LV = EmitLValue(cast<Expr>(S.getElement())); + + // Set the value to null. + Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)), + LV.getAddress()); + } + + EmitBlock(LoopEnd); +} + +void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) +{ + CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S); +} + +void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) +{ + CGM.getObjCRuntime().EmitThrowStmt(*this, S); +} + +void CodeGenFunction::EmitObjCAtSynchronizedStmt( + const ObjCAtSynchronizedStmt &S) +{ + CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S); +} + +CGObjCRuntime::~CGObjCRuntime() {} diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp new file mode 100644 index 000000000000..5e7eec9819c8 --- /dev/null +++ b/lib/CodeGen/CGObjCGNU.cpp @@ -0,0 +1,1582 @@ +//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides Objective-C code generation targetting the GNU runtime. The +// class in this file generates structures used by the GNU Objective-C runtime +// library. These structures are defined in objc/objc.h and objc/objc-api.h in +// the GNU runtime distribution. +// +//===----------------------------------------------------------------------===// + +#include "CGObjCRuntime.h" +#include "CodeGenModule.h" +#include "CodeGenFunction.h" + +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtObjC.h" + +#include "llvm/Intrinsics.h" +#include "llvm/Module.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Target/TargetData.h" + +#include <map> + + +using namespace clang; +using namespace CodeGen; +using llvm::dyn_cast; + +// The version of the runtime that this class targets. Must match the version +// in the runtime. +static const int RuntimeVersion = 8; +static const int NonFragileRuntimeVersion = 9; +static const int ProtocolVersion = 2; + +namespace { +class CGObjCGNU : public CodeGen::CGObjCRuntime { +private: + CodeGen::CodeGenModule &CGM; + llvm::Module &TheModule; + const llvm::PointerType *SelectorTy; + const llvm::PointerType *PtrToInt8Ty; + const llvm::FunctionType *IMPTy; + const llvm::PointerType *IdTy; + const llvm::IntegerType *IntTy; + const llvm::PointerType *PtrTy; + const llvm::IntegerType *LongTy; + const llvm::PointerType *PtrToIntTy; + llvm::GlobalAlias *ClassPtrAlias; + llvm::GlobalAlias *MetaClassPtrAlias; + std::vector<llvm::Constant*> Classes; + std::vector<llvm::Constant*> Categories; + std::vector<llvm::Constant*> ConstantStrings; + llvm::Function *LoadFunction; + llvm::StringMap<llvm::Constant*> ExistingProtocols; + typedef std::pair<std::string, std::string> TypedSelector; + std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors; + llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors; + // Some zeros used for GEPs in lots of places. + llvm::Constant *Zeros[2]; + llvm::Constant *NULLPtr; +private: + llvm::Constant *GenerateIvarList( + const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames, + const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes, + const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets); + llvm::Constant *GenerateMethodList(const std::string &ClassName, + const std::string &CategoryName, + const llvm::SmallVectorImpl<Selector> &MethodSels, + const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes, + bool isClassMethodList); + llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName); + llvm::Constant *GenerateProtocolList( + const llvm::SmallVectorImpl<std::string> &Protocols); + llvm::Constant *GenerateClassStructure( + llvm::Constant *MetaClass, + llvm::Constant *SuperClass, + unsigned info, + const char *Name, + llvm::Constant *Version, + llvm::Constant *InstanceSize, + llvm::Constant *IVars, + llvm::Constant *Methods, + llvm::Constant *Protocols); + llvm::Constant *GenerateProtocolMethodList( + const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames, + const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes); + llvm::Constant *MakeConstantString(const std::string &Str, const std::string + &Name=""); + llvm::Constant *MakeGlobal(const llvm::StructType *Ty, + std::vector<llvm::Constant*> &V, const std::string &Name=""); + llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty, + std::vector<llvm::Constant*> &V, const std::string &Name=""); + llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID, + const ObjCIvarDecl *Ivar); +public: + CGObjCGNU(CodeGen::CodeGenModule &cgm); + virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *); + virtual CodeGen::RValue + GenerateMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs, + const ObjCMethodDecl *Method); + virtual CodeGen::RValue + GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + const ObjCInterfaceDecl *Class, + bool isCategoryImpl, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs); + virtual llvm::Value *GetClass(CGBuilderTy &Builder, + const ObjCInterfaceDecl *OID); + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel); + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl + *Method); + + virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD, + const ObjCContainerDecl *CD); + virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD); + virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl); + virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder, + const ObjCProtocolDecl *PD); + virtual void GenerateProtocol(const ObjCProtocolDecl *PD); + virtual llvm::Function *ModuleInitFunction(); + virtual llvm::Function *GetPropertyGetFunction(); + virtual llvm::Function *GetPropertySetFunction(); + virtual llvm::Function *EnumerationMutationFunction(); + + virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const Stmt &S); + virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtThrowStmt &S); + virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, + llvm::Value *AddrWeakObj); + virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst); + virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, + QualType ObjectTy, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers); + virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, + const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar); +}; +} // end anonymous namespace + + + +static std::string SymbolNameForClass(const std::string &ClassName) { + return "___objc_class_name_" + ClassName; +} + +static std::string SymbolNameForMethod(const std::string &ClassName, const + std::string &CategoryName, const std::string &MethodName, bool isClassMethod) +{ + return "._objc_method_" + ClassName +"("+CategoryName+")"+ + (isClassMethod ? "+" : "-") + MethodName; +} + +CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm) + : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0), + MetaClassPtrAlias(0) { + IntTy = cast<llvm::IntegerType>( + CGM.getTypes().ConvertType(CGM.getContext().IntTy)); + LongTy = cast<llvm::IntegerType>( + CGM.getTypes().ConvertType(CGM.getContext().LongTy)); + + Zeros[0] = llvm::ConstantInt::get(LongTy, 0); + Zeros[1] = Zeros[0]; + NULLPtr = llvm::ConstantPointerNull::get( + llvm::PointerType::getUnqual(llvm::Type::Int8Ty)); + // C string type. Used in lots of places. + PtrToInt8Ty = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + // Get the selector Type. + SelectorTy = cast<llvm::PointerType>( + CGM.getTypes().ConvertType(CGM.getContext().getObjCSelType())); + + PtrToIntTy = llvm::PointerType::getUnqual(IntTy); + PtrTy = PtrToInt8Ty; + + // Object type + IdTy = cast<llvm::PointerType>( + CGM.getTypes().ConvertType(CGM.getContext().getObjCIdType())); + + // IMP type + std::vector<const llvm::Type*> IMPArgs; + IMPArgs.push_back(IdTy); + IMPArgs.push_back(SelectorTy); + IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true); +} +// This has to perform the lookup every time, since posing and related +// techniques can modify the name -> class mapping. +llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder, + const ObjCInterfaceDecl *OID) { + llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString()); + ClassName = Builder.CreateStructGEP(ClassName, 0); + + std::vector<const llvm::Type*> Params(1, PtrToInt8Ty); + llvm::Constant *ClassLookupFn = + CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, + Params, + true), + "objc_lookup_class"); + return Builder.CreateCall(ClassLookupFn, ClassName); +} + +llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) { + llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()]; + if (US == 0) + US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy), + llvm::GlobalValue::InternalLinkage, + ".objc_untyped_selector_alias", + NULL, &TheModule); + + return Builder.CreateLoad(US); +} + +llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl + *Method) { + + std::string SelName = Method->getSelector().getAsString(); + std::string SelTypes; + CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes); + // Typed selectors + TypedSelector Selector = TypedSelector(SelName, + SelTypes); + + // If it's already cached, return it. + if (TypedSelectors[Selector]) + { + return Builder.CreateLoad(TypedSelectors[Selector]); + } + + // If it isn't, cache it. + llvm::GlobalAlias *Sel = new llvm::GlobalAlias( + llvm::PointerType::getUnqual(SelectorTy), + llvm::GlobalValue::InternalLinkage, SelName, + NULL, &TheModule); + TypedSelectors[Selector] = Sel; + + return Builder.CreateLoad(Sel); +} + +llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str, + const std::string &Name) { + llvm::Constant * ConstStr = llvm::ConstantArray::get(Str); + ConstStr = new llvm::GlobalVariable(ConstStr->getType(), true, + llvm::GlobalValue::InternalLinkage, + ConstStr, Name, &TheModule); + return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2); +} +llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty, + std::vector<llvm::Constant*> &V, const std::string &Name) { + llvm::Constant *C = llvm::ConstantStruct::get(Ty, V); + return new llvm::GlobalVariable(Ty, false, + llvm::GlobalValue::InternalLinkage, C, Name, &TheModule); +} +llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty, + std::vector<llvm::Constant*> &V, const std::string &Name) { + llvm::Constant *C = llvm::ConstantArray::get(Ty, V); + return new llvm::GlobalVariable(Ty, false, + llvm::GlobalValue::InternalLinkage, C, Name, &TheModule); +} + +/// Generate an NSConstantString object. +//TODO: In case there are any crazy people still using the GNU runtime without +//an OpenStep implementation, this should let them select their own class for +//constant strings. +llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) { + std::string Str(SL->getString()->getStrData(), + SL->getString()->getByteLength()); + std::vector<llvm::Constant*> Ivars; + Ivars.push_back(NULLPtr); + Ivars.push_back(MakeConstantString(Str)); + Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size())); + llvm::Constant *ObjCStr = MakeGlobal( + llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL), + Ivars, ".objc_str"); + ConstantStrings.push_back( + llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty)); + return ObjCStr; +} + +///Generates a message send where the super is the receiver. This is a message +///send to self with special delivery semantics indicating which class's method +///should be called. +CodeGen::RValue +CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + const ObjCInterfaceDecl *Class, + bool isCategoryImpl, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs) { + llvm::Value *cmd = GetSelector(CGF.Builder, Sel); + + CallArgList ActualArgs; + + ActualArgs.push_back( + std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)), + CGF.getContext().getObjCIdType())); + ActualArgs.push_back(std::make_pair(RValue::get(cmd), + CGF.getContext().getObjCSelType())); + ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end()); + + CodeGenTypes &Types = CGM.getTypes(); + const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs); + const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false); + + llvm::Value *ReceiverClass = 0; + if (isCategoryImpl) { + llvm::Constant *classLookupFunction = 0; + std::vector<const llvm::Type*> Params; + Params.push_back(PtrTy); + if (IsClassMessage) { + classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( + IdTy, Params, true), "objc_get_meta_class"); + } else { + classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( + IdTy, Params, true), "objc_get_class"); + } + ReceiverClass = CGF.Builder.CreateCall(classLookupFunction, + MakeConstantString(Class->getNameAsString())); + } else { + // Set up global aliases for the metaclass or class pointer if they do not + // already exist. These will are forward-references which will be set to + // pointers to the class and metaclass structure created for the runtime load + // function. To send a message to super, we look up the value of the + // super_class pointer from either the class or metaclass structure. + if (IsClassMessage) { + if (!MetaClassPtrAlias) { + MetaClassPtrAlias = new llvm::GlobalAlias(IdTy, + llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" + + Class->getNameAsString(), NULL, &TheModule); + } + ReceiverClass = MetaClassPtrAlias; + } else { + if (!ClassPtrAlias) { + ClassPtrAlias = new llvm::GlobalAlias(IdTy, + llvm::GlobalValue::InternalLinkage, ".objc_class_ref" + + Class->getNameAsString(), NULL, &TheModule); + } + ReceiverClass = ClassPtrAlias; + } + } + // Cast the pointer to a simplified version of the class structure + ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass, + llvm::PointerType::getUnqual(llvm::StructType::get(IdTy, IdTy, NULL))); + // Get the superclass pointer + ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1); + // Load the superclass pointer + ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass); + // Construct the structure used to look up the IMP + llvm::StructType *ObjCSuperTy = llvm::StructType::get(Receiver->getType(), + IdTy, NULL); + llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy); + + CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0)); + CGF.Builder.CreateStore(ReceiverClass, + CGF.Builder.CreateStructGEP(ObjCSuper, 1)); + + // Get the IMP + std::vector<const llvm::Type*> Params; + Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy)); + Params.push_back(SelectorTy); + llvm::Constant *lookupFunction = + CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::PointerType::getUnqual(impType), Params, true), + "objc_msg_lookup_super"); + + llvm::Value *lookupArgs[] = {ObjCSuper, cmd}; + llvm::Value *imp = CGF.Builder.CreateCall(lookupFunction, lookupArgs, + lookupArgs+2); + + return CGF.EmitCall(FnInfo, imp, ActualArgs); +} + +/// Generate code for a message send expression. +CodeGen::RValue +CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs, + const ObjCMethodDecl *Method) { + llvm::Value *cmd; + if (Method) + cmd = GetSelector(CGF.Builder, Method); + else + cmd = GetSelector(CGF.Builder, Sel); + CallArgList ActualArgs; + + ActualArgs.push_back( + std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)), + CGF.getContext().getObjCIdType())); + ActualArgs.push_back(std::make_pair(RValue::get(cmd), + CGF.getContext().getObjCSelType())); + ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end()); + + CodeGenTypes &Types = CGM.getTypes(); + const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs); + const llvm::FunctionType *impType = Types.GetFunctionType(FnInfo, false); + + llvm::Value *imp; + std::vector<const llvm::Type*> Params; + Params.push_back(Receiver->getType()); + Params.push_back(SelectorTy); + // For sender-aware dispatch, we pass the sender as the third argument to a + // lookup function. When sending messages from C code, the sender is nil. + // objc_msg_lookup_sender(id receiver, SEL selector, id sender); + if (CGM.getContext().getLangOptions().ObjCSenderDispatch) { + llvm::Value *self; + + if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) { + self = CGF.LoadObjCSelf(); + } else { + self = llvm::ConstantPointerNull::get(IdTy); + } + Params.push_back(self->getType()); + llvm::Constant *lookupFunction = + CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::PointerType::getUnqual(impType), Params, true), + "objc_msg_lookup_sender"); + + imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self); + } else { + llvm::Constant *lookupFunction = + CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::PointerType::getUnqual(impType), Params, true), + "objc_msg_lookup"); + + imp = CGF.Builder.CreateCall2(lookupFunction, Receiver, cmd); + } + + return CGF.EmitCall(FnInfo, imp, ActualArgs); +} + +/// Generates a MethodList. Used in construction of a objc_class and +/// objc_category structures. +llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName, + const std::string &CategoryName, + const llvm::SmallVectorImpl<Selector> &MethodSels, + const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes, + bool isClassMethodList) { + // Get the method structure type. + llvm::StructType *ObjCMethodTy = llvm::StructType::get( + PtrToInt8Ty, // Really a selector, but the runtime creates it us. + PtrToInt8Ty, // Method types + llvm::PointerType::getUnqual(IMPTy), //Method pointer + NULL); + std::vector<llvm::Constant*> Methods; + std::vector<llvm::Constant*> Elements; + for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) { + Elements.clear(); + if (llvm::Constant *Method = + TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName, + MethodSels[i].getAsString(), + isClassMethodList))) { + llvm::Constant *C = + CGM.GetAddrOfConstantCString(MethodSels[i].getAsString()); + Elements.push_back(llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2)); + Elements.push_back( + llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2)); + Method = llvm::ConstantExpr::getBitCast(Method, + llvm::PointerType::getUnqual(IMPTy)); + Elements.push_back(Method); + Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements)); + } + } + + // Array of method structures + llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy, + Methods.size()); + llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy, + Methods); + + // Structure containing list pointer, array and array count + llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields; + llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(); + llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy); + llvm::StructType *ObjCMethodListTy = llvm::StructType::get(NextPtrTy, + IntTy, + ObjCMethodArrayTy, + NULL); + // Refine next pointer type to concrete type + llvm::cast<llvm::OpaqueType>( + OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy); + ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get()); + + Methods.clear(); + Methods.push_back(llvm::ConstantPointerNull::get( + llvm::PointerType::getUnqual(ObjCMethodListTy))); + Methods.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, + MethodTypes.size())); + Methods.push_back(MethodArray); + + // Create an instance of the structure + return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list"); +} + +/// Generates an IvarList. Used in construction of a objc_class. +llvm::Constant *CGObjCGNU::GenerateIvarList( + const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames, + const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes, + const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) { + // Get the method structure type. + llvm::StructType *ObjCIvarTy = llvm::StructType::get( + PtrToInt8Ty, + PtrToInt8Ty, + IntTy, + NULL); + std::vector<llvm::Constant*> Ivars; + std::vector<llvm::Constant*> Elements; + for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) { + Elements.clear(); + Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarNames[i], + Zeros, 2)); + Elements.push_back( llvm::ConstantExpr::getGetElementPtr(IvarTypes[i], + Zeros, 2)); + Elements.push_back(IvarOffsets[i]); + Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements)); + } + + // Array of method structures + llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy, + IvarNames.size()); + + + Elements.clear(); + Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size())); + Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars)); + // Structure containing array and array count + llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy, + ObjCIvarArrayTy, + NULL); + + // Create an instance of the structure + return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list"); +} + +/// Generate a class structure +llvm::Constant *CGObjCGNU::GenerateClassStructure( + llvm::Constant *MetaClass, + llvm::Constant *SuperClass, + unsigned info, + const char *Name, + llvm::Constant *Version, + llvm::Constant *InstanceSize, + llvm::Constant *IVars, + llvm::Constant *Methods, + llvm::Constant *Protocols) { + // Set up the class structure + // Note: Several of these are char*s when they should be ids. This is + // because the runtime performs this translation on load. + llvm::StructType *ClassTy = llvm::StructType::get( + PtrToInt8Ty, // class_pointer + PtrToInt8Ty, // super_class + PtrToInt8Ty, // name + LongTy, // version + LongTy, // info + LongTy, // instance_size + IVars->getType(), // ivars + Methods->getType(), // methods + // These are all filled in by the runtime, so we pretend + PtrTy, // dtable + PtrTy, // subclass_list + PtrTy, // sibling_class + PtrTy, // protocols + PtrTy, // gc_object_type + NULL); + llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0); + llvm::Constant *NullP = + llvm::ConstantPointerNull::get(PtrTy); + // Fill in the structure + std::vector<llvm::Constant*> Elements; + Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty)); + Elements.push_back(SuperClass); + Elements.push_back(MakeConstantString(Name, ".class_name")); + Elements.push_back(Zero); + Elements.push_back(llvm::ConstantInt::get(LongTy, info)); + Elements.push_back(InstanceSize); + Elements.push_back(IVars); + Elements.push_back(Methods); + Elements.push_back(NullP); + Elements.push_back(NullP); + Elements.push_back(NullP); + Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy)); + Elements.push_back(NullP); + // Create an instance of the structure + return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name)); +} + +llvm::Constant *CGObjCGNU::GenerateProtocolMethodList( + const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames, + const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) { + // Get the method structure type. + llvm::StructType *ObjCMethodDescTy = llvm::StructType::get( + PtrToInt8Ty, // Really a selector, but the runtime does the casting for us. + PtrToInt8Ty, + NULL); + std::vector<llvm::Constant*> Methods; + std::vector<llvm::Constant*> Elements; + for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) { + Elements.clear(); + Elements.push_back( llvm::ConstantExpr::getGetElementPtr(MethodNames[i], + Zeros, 2)); + Elements.push_back( + llvm::ConstantExpr::getGetElementPtr(MethodTypes[i], Zeros, 2)); + Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements)); + } + llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy, + MethodNames.size()); + llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods); + llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get( + IntTy, ObjCMethodArrayTy, NULL); + Methods.clear(); + Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size())); + Methods.push_back(Array); + return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list"); +} +// Create the protocol list structure used in classes, categories and so on +llvm::Constant *CGObjCGNU::GenerateProtocolList( + const llvm::SmallVectorImpl<std::string> &Protocols) { + llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty, + Protocols.size()); + llvm::StructType *ProtocolListTy = llvm::StructType::get( + PtrTy, //Should be a recurisve pointer, but it's always NULL here. + LongTy,//FIXME: Should be size_t + ProtocolArrayTy, + NULL); + std::vector<llvm::Constant*> Elements; + for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end(); + iter != endIter ; iter++) { + llvm::Constant *protocol = ExistingProtocols[*iter]; + if (!protocol) + protocol = GenerateEmptyProtocol(*iter); + llvm::Constant *Ptr = + llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty); + Elements.push_back(Ptr); + } + llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy, + Elements); + Elements.clear(); + Elements.push_back(NULLPtr); + Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size())); + Elements.push_back(ProtocolArray); + return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list"); +} + +llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder, + const ObjCProtocolDecl *PD) { + llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()]; + const llvm::Type *T = + CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType()); + return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T)); +} + +llvm::Constant *CGObjCGNU::GenerateEmptyProtocol( + const std::string &ProtocolName) { + llvm::SmallVector<std::string, 0> EmptyStringVector; + llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector; + + llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector); + llvm::Constant *InstanceMethodList = + GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector); + llvm::Constant *ClassMethodList = + GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector); + // Protocols are objects containing lists of the methods implemented and + // protocols adopted. + llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy, + PtrToInt8Ty, + ProtocolList->getType(), + InstanceMethodList->getType(), + ClassMethodList->getType(), + NULL); + std::vector<llvm::Constant*> Elements; + // The isa pointer must be set to a magic number so the runtime knows it's + // the correct layout. + Elements.push_back(llvm::ConstantExpr::getIntToPtr( + llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy)); + Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); + Elements.push_back(ProtocolList); + Elements.push_back(InstanceMethodList); + Elements.push_back(ClassMethodList); + return MakeGlobal(ProtocolTy, Elements, ".objc_protocol"); +} + +void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) { + ASTContext &Context = CGM.getContext(); + std::string ProtocolName = PD->getNameAsString(); + llvm::SmallVector<std::string, 16> Protocols; + for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(), + E = PD->protocol_end(); PI != E; ++PI) + Protocols.push_back((*PI)->getNameAsString()); + llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames; + llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes; + for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(Context), + E = PD->instmeth_end(Context); iter != E; iter++) { + std::string TypeStr; + Context.getObjCEncodingForMethodDecl(*iter, TypeStr); + InstanceMethodNames.push_back( + CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString())); + InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + // Collect information about class methods: + llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames; + llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes; + for (ObjCProtocolDecl::classmeth_iterator + iter = PD->classmeth_begin(Context), + endIter = PD->classmeth_end(Context) ; iter != endIter ; iter++) { + std::string TypeStr; + Context.getObjCEncodingForMethodDecl((*iter),TypeStr); + ClassMethodNames.push_back( + CGM.GetAddrOfConstantCString((*iter)->getSelector().getAsString())); + ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + + llvm::Constant *ProtocolList = GenerateProtocolList(Protocols); + llvm::Constant *InstanceMethodList = + GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes); + llvm::Constant *ClassMethodList = + GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes); + // Protocols are objects containing lists of the methods implemented and + // protocols adopted. + llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy, + PtrToInt8Ty, + ProtocolList->getType(), + InstanceMethodList->getType(), + ClassMethodList->getType(), + NULL); + std::vector<llvm::Constant*> Elements; + // The isa pointer must be set to a magic number so the runtime knows it's + // the correct layout. + Elements.push_back(llvm::ConstantExpr::getIntToPtr( + llvm::ConstantInt::get(llvm::Type::Int32Ty, ProtocolVersion), IdTy)); + Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); + Elements.push_back(ProtocolList); + Elements.push_back(InstanceMethodList); + Elements.push_back(ClassMethodList); + ExistingProtocols[ProtocolName] = + llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements, + ".objc_protocol"), IdTy); +} + +void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) { + std::string ClassName = OCD->getClassInterface()->getNameAsString(); + std::string CategoryName = OCD->getNameAsString(); + // Collect information about instance methods + llvm::SmallVector<Selector, 16> InstanceMethodSels; + llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes; + for (ObjCCategoryImplDecl::instmeth_iterator + iter = OCD->instmeth_begin(CGM.getContext()), + endIter = OCD->instmeth_end(CGM.getContext()); + iter != endIter ; iter++) { + InstanceMethodSels.push_back((*iter)->getSelector()); + std::string TypeStr; + CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr); + InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + + // Collect information about class methods + llvm::SmallVector<Selector, 16> ClassMethodSels; + llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes; + for (ObjCCategoryImplDecl::classmeth_iterator + iter = OCD->classmeth_begin(CGM.getContext()), + endIter = OCD->classmeth_end(CGM.getContext()); + iter != endIter ; iter++) { + ClassMethodSels.push_back((*iter)->getSelector()); + std::string TypeStr; + CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr); + ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + + // Collect the names of referenced protocols + llvm::SmallVector<std::string, 16> Protocols; + const ObjCInterfaceDecl *ClassDecl = OCD->getClassInterface(); + const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols(); + for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(), + E = Protos.end(); I != E; ++I) + Protocols.push_back((*I)->getNameAsString()); + + std::vector<llvm::Constant*> Elements; + Elements.push_back(MakeConstantString(CategoryName)); + Elements.push_back(MakeConstantString(ClassName)); + // Instance method list + Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( + ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes, + false), PtrTy)); + // Class method list + Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( + ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true), + PtrTy)); + // Protocol list + Elements.push_back(llvm::ConstantExpr::getBitCast( + GenerateProtocolList(Protocols), PtrTy)); + Categories.push_back(llvm::ConstantExpr::getBitCast( + MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy, + PtrTy, PtrTy, NULL), Elements), PtrTy)); +} + +void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) { + ASTContext &Context = CGM.getContext(); + + // Get the superclass name. + const ObjCInterfaceDecl * SuperClassDecl = + OID->getClassInterface()->getSuperClass(); + std::string SuperClassName; + if (SuperClassDecl) + SuperClassName = SuperClassDecl->getNameAsString(); + + // Get the class name + ObjCInterfaceDecl *ClassDecl = + const_cast<ObjCInterfaceDecl *>(OID->getClassInterface()); + std::string ClassName = ClassDecl->getNameAsString(); + + // Get the size of instances. + int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8; + + // Collect information about instance variables. + llvm::SmallVector<llvm::Constant*, 16> IvarNames; + llvm::SmallVector<llvm::Constant*, 16> IvarTypes; + llvm::SmallVector<llvm::Constant*, 16> IvarOffsets; + + int superInstanceSize = !SuperClassDecl ? 0 : + Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8; + // For non-fragile ivars, set the instance size to 0 - {the size of just this + // class}. The runtime will then set this to the correct value on load. + if (CGM.getContext().getLangOptions().ObjCNonFragileABI) { + instanceSize = 0 - (instanceSize - superInstanceSize); + } + for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(), + endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) { + // Store the name + IvarNames.push_back(CGM.GetAddrOfConstantCString((*iter) + ->getNameAsString())); + // Get the type encoding for this ivar + std::string TypeStr; + Context.getObjCEncodingForType((*iter)->getType(), TypeStr); + IvarTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + // Get the offset + uint64_t Offset; + if (CGM.getContext().getLangOptions().ObjCNonFragileABI) { + Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter) - + superInstanceSize; + ObjCIvarOffsetVariable(ClassDecl, *iter); + } else { + Offset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter); + } + IvarOffsets.push_back( + llvm::ConstantInt::get(llvm::Type::Int32Ty, Offset)); + } + + // Collect information about instance methods + llvm::SmallVector<Selector, 16> InstanceMethodSels; + llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes; + for (ObjCImplementationDecl::instmeth_iterator + iter = OID->instmeth_begin(CGM.getContext()), + endIter = OID->instmeth_end(CGM.getContext()); + iter != endIter ; iter++) { + InstanceMethodSels.push_back((*iter)->getSelector()); + std::string TypeStr; + Context.getObjCEncodingForMethodDecl((*iter),TypeStr); + InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + for (ObjCImplDecl::propimpl_iterator + iter = OID->propimpl_begin(CGM.getContext()), + endIter = OID->propimpl_end(CGM.getContext()); + iter != endIter ; iter++) { + ObjCPropertyDecl *property = (*iter)->getPropertyDecl(); + if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) { + InstanceMethodSels.push_back(getter->getSelector()); + std::string TypeStr; + Context.getObjCEncodingForMethodDecl(getter,TypeStr); + InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) { + InstanceMethodSels.push_back(setter->getSelector()); + std::string TypeStr; + Context.getObjCEncodingForMethodDecl(setter,TypeStr); + InstanceMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + } + + // Collect information about class methods + llvm::SmallVector<Selector, 16> ClassMethodSels; + llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes; + for (ObjCImplementationDecl::classmeth_iterator + iter = OID->classmeth_begin(CGM.getContext()), + endIter = OID->classmeth_end(CGM.getContext()); + iter != endIter ; iter++) { + ClassMethodSels.push_back((*iter)->getSelector()); + std::string TypeStr; + Context.getObjCEncodingForMethodDecl((*iter),TypeStr); + ClassMethodTypes.push_back(CGM.GetAddrOfConstantCString(TypeStr)); + } + // Collect the names of referenced protocols + llvm::SmallVector<std::string, 16> Protocols; + const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols(); + for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(), + E = Protos.end(); I != E; ++I) + Protocols.push_back((*I)->getNameAsString()); + + + + // Get the superclass pointer. + llvm::Constant *SuperClass; + if (!SuperClassName.empty()) { + SuperClass = MakeConstantString(SuperClassName, ".super_class_name"); + } else { + SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty); + } + // Empty vector used to construct empty method lists + llvm::SmallVector<llvm::Constant*, 1> empty; + // Generate the method and instance variable lists + llvm::Constant *MethodList = GenerateMethodList(ClassName, "", + InstanceMethodSels, InstanceMethodTypes, false); + llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "", + ClassMethodSels, ClassMethodTypes, true); + llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes, + IvarOffsets); + //Generate metaclass for class methods + llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr, + NULLPtr, 0x2L, /*name*/"", 0, Zeros[0], GenerateIvarList( + empty, empty, empty), ClassMethodList, NULLPtr); + + // Generate the class structure + llvm::Constant *ClassStruct = + GenerateClassStructure(MetaClassStruct, SuperClass, 0x1L, + ClassName.c_str(), 0, + llvm::ConstantInt::get(LongTy, instanceSize), IvarList, + MethodList, GenerateProtocolList(Protocols)); + + // Resolve the class aliases, if they exist. + if (ClassPtrAlias) { + ClassPtrAlias->setAliasee( + llvm::ConstantExpr::getBitCast(ClassStruct, IdTy)); + ClassPtrAlias = 0; + } + if (MetaClassPtrAlias) { + MetaClassPtrAlias->setAliasee( + llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy)); + MetaClassPtrAlias = 0; + } + + // Add class structure to list to be added to the symtab later + ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty); + Classes.push_back(ClassStruct); +} + +llvm::Function *CGObjCGNU::ModuleInitFunction() { + // Only emit an ObjC load function if no Objective-C stuff has been called + if (Classes.empty() && Categories.empty() && ConstantStrings.empty() && + ExistingProtocols.empty() && TypedSelectors.empty() && + UntypedSelectors.empty()) + return NULL; + + const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>( + SelectorTy->getElementType()); + const llvm::Type *SelStructPtrTy = SelectorTy; + bool isSelOpaque = false; + if (SelStructTy == 0) { + SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL); + SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy); + isSelOpaque = true; + } + + // Name the ObjC types to make the IR a bit easier to read + TheModule.addTypeName(".objc_selector", SelStructPtrTy); + TheModule.addTypeName(".objc_id", IdTy); + TheModule.addTypeName(".objc_imp", IMPTy); + + std::vector<llvm::Constant*> Elements; + llvm::Constant *Statics = NULLPtr; + // Generate statics list: + if (ConstantStrings.size()) { + llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty, + ConstantStrings.size() + 1); + ConstantStrings.push_back(NULLPtr); + Elements.push_back(MakeConstantString("NSConstantString", + ".objc_static_class_name")); + Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy, + ConstantStrings)); + llvm::StructType *StaticsListTy = + llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL); + llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy); + Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics"); + llvm::ArrayType *StaticsListArrayTy = + llvm::ArrayType::get(StaticsListPtrTy, 2); + Elements.clear(); + Elements.push_back(Statics); + Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy)); + Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr"); + Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy); + } + // Array of classes, categories, and constant objects + llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty, + Classes.size() + Categories.size() + 2); + llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy, + llvm::Type::Int16Ty, + llvm::Type::Int16Ty, + ClassListTy, NULL); + + Elements.clear(); + // Pointer to an array of selectors used in this module. + std::vector<llvm::Constant*> Selectors; + for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator + iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end(); + iter != iterEnd ; ++iter) { + Elements.push_back(MakeConstantString(iter->first.first, ".objc_sel_name")); + Elements.push_back(MakeConstantString(iter->first.second, + ".objc_sel_types")); + Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements)); + Elements.clear(); + } + for (llvm::StringMap<llvm::GlobalAlias*>::iterator + iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end(); + iter != iterEnd; ++iter) { + Elements.push_back( + MakeConstantString(iter->getKeyData(), ".objc_sel_name")); + Elements.push_back(NULLPtr); + Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements)); + Elements.clear(); + } + Elements.push_back(NULLPtr); + Elements.push_back(NULLPtr); + Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements)); + Elements.clear(); + // Number of static selectors + Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() )); + llvm::Constant *SelectorList = MakeGlobal( + llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors, + ".objc_selector_list"); + Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList, + SelStructPtrTy)); + + // Now that all of the static selectors exist, create pointers to them. + int index = 0; + for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator + iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end(); + iter != iterEnd; ++iter) { + llvm::Constant *Idxs[] = {Zeros[0], + llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]}; + llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy, + true, llvm::GlobalValue::InternalLinkage, + llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2), + ".objc_sel_ptr", &TheModule); + // If selectors are defined as an opaque type, cast the pointer to this + // type. + if (isSelOpaque) { + SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, + llvm::PointerType::getUnqual(SelectorTy)); + } + (*iter).second->setAliasee(SelPtr); + } + for (llvm::StringMap<llvm::GlobalAlias*>::iterator + iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end(); + iter != iterEnd; iter++) { + llvm::Constant *Idxs[] = {Zeros[0], + llvm::ConstantInt::get(llvm::Type::Int32Ty, index++), Zeros[0]}; + llvm::Constant *SelPtr = new llvm::GlobalVariable(SelStructPtrTy, true, + llvm::GlobalValue::InternalLinkage, + llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2), + ".objc_sel_ptr", &TheModule); + // If selectors are defined as an opaque type, cast the pointer to this + // type. + if (isSelOpaque) { + SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, + llvm::PointerType::getUnqual(SelectorTy)); + } + (*iter).second->setAliasee(SelPtr); + } + // Number of classes defined. + Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty, + Classes.size())); + // Number of categories defined + Elements.push_back(llvm::ConstantInt::get(llvm::Type::Int16Ty, + Categories.size())); + // Create an array of classes, then categories, then static object instances + Classes.insert(Classes.end(), Categories.begin(), Categories.end()); + // NULL-terminated list of static object instances (mainly constant strings) + Classes.push_back(Statics); + Classes.push_back(NULLPtr); + llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes); + Elements.push_back(ClassList); + // Construct the symbol table + llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements); + + // The symbol table is contained in a module which has some version-checking + // constants + llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy, + PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL); + Elements.clear(); + // Runtime version used for compatibility checking. + if (CGM.getContext().getLangOptions().ObjCNonFragileABI) { + Elements.push_back(llvm::ConstantInt::get(LongTy, + NonFragileRuntimeVersion)); + } else { + Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion)); + } + // sizeof(ModuleTy) + llvm::TargetData td = llvm::TargetData::TargetData(&TheModule); + Elements.push_back(llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy)/8)); + //FIXME: Should be the path to the file where this module was declared + Elements.push_back(NULLPtr); + Elements.push_back(SymTab); + llvm::Value *Module = MakeGlobal(ModuleTy, Elements); + + // Create the load function calling the runtime entry point with the module + // structure + std::vector<const llvm::Type*> VoidArgs; + llvm::Function * LoadFunction = llvm::Function::Create( + llvm::FunctionType::get(llvm::Type::VoidTy, VoidArgs, false), + llvm::GlobalValue::InternalLinkage, ".objc_load_function", + &TheModule); + llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create("entry", LoadFunction); + CGBuilderTy Builder; + Builder.SetInsertPoint(EntryBB); + + std::vector<const llvm::Type*> Params(1, + llvm::PointerType::getUnqual(ModuleTy)); + llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get( + llvm::Type::VoidTy, Params, true), "__objc_exec_class"); + Builder.CreateCall(Register, Module); + Builder.CreateRetVoid(); + + return LoadFunction; +} + +llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD, + const ObjCContainerDecl *CD) { + const ObjCCategoryImplDecl *OCD = + dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext()); + std::string CategoryName = OCD ? OCD->getNameAsString() : ""; + std::string ClassName = OMD->getClassInterface()->getNameAsString(); + std::string MethodName = OMD->getSelector().getAsString(); + bool isClassMethod = !OMD->isInstanceMethod(); + + CodeGenTypes &Types = CGM.getTypes(); + const llvm::FunctionType *MethodTy = + Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic()); + std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName, + MethodName, isClassMethod); + + llvm::Function *Method = llvm::Function::Create(MethodTy, + llvm::GlobalValue::InternalLinkage, + FunctionName, + &TheModule); + return Method; +} + +llvm::Function *CGObjCGNU::GetPropertyGetFunction() { + std::vector<const llvm::Type*> Params; + const llvm::Type *BoolTy = + CGM.getTypes().ConvertType(CGM.getContext().BoolTy); + Params.push_back(IdTy); + Params.push_back(SelectorTy); + // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64 + Params.push_back(LongTy); + Params.push_back(BoolTy); + // void objc_getProperty (id, SEL, ptrdiff_t, bool) + const llvm::FunctionType *FTy = + llvm::FunctionType::get(IdTy, Params, false); + return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy, + "objc_getProperty")); +} + +llvm::Function *CGObjCGNU::GetPropertySetFunction() { + std::vector<const llvm::Type*> Params; + const llvm::Type *BoolTy = + CGM.getTypes().ConvertType(CGM.getContext().BoolTy); + Params.push_back(IdTy); + Params.push_back(SelectorTy); + // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64 + Params.push_back(LongTy); + Params.push_back(IdTy); + Params.push_back(BoolTy); + Params.push_back(BoolTy); + // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool) + const llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Params, false); + return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy, + "objc_setProperty")); +} + +llvm::Function *CGObjCGNU::EnumerationMutationFunction() { + std::vector<const llvm::Type*> Params(1, IdTy); + return cast<llvm::Function>(CGM.CreateRuntimeFunction( + llvm::FunctionType::get(llvm::Type::VoidTy, Params, true), + "objc_enumerationMutation")); +} + +void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const Stmt &S) { + // Pointer to the personality function + llvm::Constant *Personality = + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + std::vector<const llvm::Type*>(), true), + "__gnu_objc_personality_v0"); + Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy); + std::vector<const llvm::Type*> Params; + Params.push_back(PtrTy); + llvm::Value *RethrowFn = + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, false), "_Unwind_Resume_or_Rethrow"); + + bool isTry = isa<ObjCAtTryStmt>(S); + llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try"); + llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest(); + llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler"); + llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow"); + llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally"); + llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw"); + llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end"); + + // GNU runtime does not currently support @synchronized() + if (!isTry) { + std::vector<const llvm::Type*> Args(1, IdTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); + llvm::Value *SyncArg = + CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); + SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); + CGF.Builder.CreateCall(SyncEnter, SyncArg); + } + + + // Push an EH context entry, used for handling rethrows and jumps + // through finally. + CGF.PushCleanupBlock(FinallyBlock); + + // Emit the statements in the @try {} block + CGF.setInvokeDest(TryHandler); + + CGF.EmitBlock(TryBlock); + CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody() + : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); + + // Jump to @finally if there is no exception + CGF.EmitBranchThroughCleanup(FinallyEnd); + + // Emit the handlers + CGF.EmitBlock(TryHandler); + + // Get the correct versions of the exception handling intrinsics + llvm::TargetData td = llvm::TargetData::TargetData(&TheModule); + int PointerWidth = td.getTypeSizeInBits(PtrTy); + assert((PointerWidth == 32 || PointerWidth == 64) && + "Can't yet handle exceptions if pointers are not 32 or 64 bits"); + llvm::Value *llvm_eh_exception = + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception); + llvm::Value *llvm_eh_selector = PointerWidth == 32 ? + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i32) : + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i64); + llvm::Value *llvm_eh_typeid_for = PointerWidth == 32 ? + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i32) : + CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for_i64); + + // Exception object + llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); + llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow"); + + llvm::SmallVector<llvm::Value*, 8> ESelArgs; + llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers; + + ESelArgs.push_back(Exc); + ESelArgs.push_back(Personality); + + bool HasCatchAll = false; + // Only @try blocks are allowed @catch blocks, but both can have @finally + if (isTry) { + if (const ObjCAtCatchStmt* CatchStmt = + cast<ObjCAtTryStmt>(S).getCatchStmts()) { + CGF.setInvokeDest(CatchInCatch); + + for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) { + const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); + Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody())); + + // @catch() and @catch(id) both catch any ObjC exception + if (!CatchDecl || CGF.getContext().isObjCIdType(CatchDecl->getType()) + || CatchDecl->getType()->isObjCQualifiedIdType()) { + // Use i8* null here to signal this is a catch all, not a cleanup. + ESelArgs.push_back(NULLPtr); + HasCatchAll = true; + // No further catches after this one will ever by reached + break; + } + + // All other types should be Objective-C interface pointer types. + const PointerType *PT = CatchDecl->getType()->getAsPointerType(); + assert(PT && "Invalid @catch type."); + const ObjCInterfaceType *IT = + PT->getPointeeType()->getAsObjCInterfaceType(); + assert(IT && "Invalid @catch type."); + llvm::Value *EHType = + MakeConstantString(IT->getDecl()->getNameAsString()); + ESelArgs.push_back(EHType); + } + } + } + + // We use a cleanup unless there was already a catch all. + if (!HasCatchAll) { + ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0)); + } + + // Find which handler was matched. + llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector, + ESelArgs.begin(), ESelArgs.end(), "selector"); + + for (unsigned i = 0, e = Handlers.size(); i != e; ++i) { + const ParmVarDecl *CatchParam = Handlers[i].first; + const Stmt *CatchBody = Handlers[i].second; + + llvm::BasicBlock *Next = 0; + + // The last handler always matches. + if (i + 1 != e) { + assert(CatchParam && "Only last handler can be a catch all."); + + // Test whether this block matches the type for the selector and branch + // to Match if it does, or to the next BB if it doesn't. + llvm::BasicBlock *Match = CGF.createBasicBlock("match"); + Next = CGF.createBasicBlock("catch.next"); + llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for, + CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy)); + CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match, + Next); + + CGF.EmitBlock(Match); + } + + if (CatchBody) { + llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc, + CGF.ConvertType(CatchParam->getType())); + + // Bind the catch parameter if it exists. + if (CatchParam) { + // CatchParam is a ParmVarDecl because of the grammar + // construction used to handle this, but for codegen purposes + // we treat this as a local decl. + CGF.EmitLocalBlockVarDecl(*CatchParam); + CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam)); + } + + CGF.ObjCEHValueStack.push_back(ExcObject); + CGF.EmitStmt(CatchBody); + CGF.ObjCEHValueStack.pop_back(); + + CGF.EmitBranchThroughCleanup(FinallyEnd); + + if (Next) + CGF.EmitBlock(Next); + } else { + assert(!Next && "catchup should be last handler."); + + CGF.Builder.CreateStore(Exc, RethrowPtr); + CGF.EmitBranchThroughCleanup(FinallyRethrow); + } + } + // The @finally block is a secondary landing pad for any exceptions thrown in + // @catch() blocks + CGF.EmitBlock(CatchInCatch); + Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc"); + ESelArgs.clear(); + ESelArgs.push_back(Exc); + ESelArgs.push_back(Personality); + ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, 0)); + CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(), + "selector"); + CGF.Builder.CreateCall(llvm_eh_typeid_for, + CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy)); + CGF.Builder.CreateStore(Exc, RethrowPtr); + CGF.EmitBranchThroughCleanup(FinallyRethrow); + + CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock(); + + CGF.setInvokeDest(PrevLandingPad); + + CGF.EmitBlock(FinallyBlock); + + + if (isTry) { + if (const ObjCAtFinallyStmt* FinallyStmt = + cast<ObjCAtTryStmt>(S).getFinallyStmt()) + CGF.EmitStmt(FinallyStmt->getFinallyBody()); + } else { + // Emit 'objc_sync_exit(expr)' as finally's sole statement for + // @synchronized. + std::vector<const llvm::Type*> Args(1, IdTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); + llvm::Value *SyncArg = + CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); + SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy); + CGF.Builder.CreateCall(SyncExit, SyncArg); + } + + if (Info.SwitchBlock) + CGF.EmitBlock(Info.SwitchBlock); + if (Info.EndBlock) + CGF.EmitBlock(Info.EndBlock); + + // Branch around the rethrow code. + CGF.EmitBranch(FinallyEnd); + + CGF.EmitBlock(FinallyRethrow); + CGF.Builder.CreateCall(RethrowFn, CGF.Builder.CreateLoad(RethrowPtr)); + CGF.Builder.CreateUnreachable(); + + CGF.EmitBlock(FinallyEnd); + +} + +void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtThrowStmt &S) { + llvm::Value *ExceptionAsObject; + + std::vector<const llvm::Type*> Args(1, IdTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + llvm::Value *ThrowFn = + CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); + + if (const Expr *ThrowExpr = S.getThrowExpr()) { + llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr); + ExceptionAsObject = Exception; + } else { + assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) && + "Unexpected rethrow outside @catch block."); + ExceptionAsObject = CGF.ObjCEHValueStack.back(); + } + ExceptionAsObject = + CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp"); + + // Note: This may have to be an invoke, if we want to support constructs like: + // @try { + // @throw(obj); + // } + // @catch(id) ... + // + // This is effectively turning @throw into an incredibly-expensive goto, but + // it may happen as a result of inlining followed by missed optimizations, or + // as a result of stupidity. + llvm::BasicBlock *UnwindBB = CGF.getInvokeDest(); + if (!UnwindBB) { + CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject); + CGF.Builder.CreateUnreachable(); + } else { + CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject, + &ExceptionAsObject+1); + } + // Clear the insertion point to indicate we are in unreachable code. + CGF.Builder.ClearInsertionPoint(); +} + +llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, + llvm::Value *AddrWeakObj) +{ + return 0; +} + +void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst) +{ + return; +} + +void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst) +{ + return; +} + +void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst) +{ + return; +} + +void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst) +{ + return; +} + +llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable( + const ObjCInterfaceDecl *ID, + const ObjCIvarDecl *Ivar) { + const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString() + + '.' + Ivar->getNameAsString(); + // Emit the variable and initialize it with what we think the correct value + // is. This allows code compiled with non-fragile ivars to work correctly + // when linked against code which isn't (most of the time). + llvm::GlobalVariable *IvarOffsetGV = CGM.getModule().getGlobalVariable(Name); + if (!IvarOffsetGV) { + uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar); + llvm::ConstantInt *OffsetGuess = + llvm::ConstantInt::get(LongTy, Offset, "ivar"); + IvarOffsetGV = new llvm::GlobalVariable(LongTy, false, + llvm::GlobalValue::CommonLinkage, OffsetGuess, Name, &TheModule); + } + return IvarOffsetGV; +} + +LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, + QualType ObjectTy, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers) { + const ObjCInterfaceDecl *ID = ObjectTy->getAsObjCInterfaceType()->getDecl(); + return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers, + EmitIvarOffset(CGF, ID, Ivar)); +} +static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context, + const ObjCInterfaceDecl *OID, + const ObjCIvarDecl *OIVD) { + for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(), + IVE = OID->ivar_end(); IVI != IVE; ++IVI) + if (OIVD == *IVI) + return OID; + + // Also look in synthesized ivars. + llvm::SmallVector<ObjCIvarDecl*, 16> Ivars; + Context.CollectSynthesizedIvars(OID, Ivars); + for (unsigned k = 0, e = Ivars.size(); k != e; ++k) { + if (OIVD == Ivars[k]) + return OID; + } + + // Otherwise check in the super class. + if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) + return FindIvarInterface(Context, Super, OIVD); + + return 0; +} + +llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF, + const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar) { + if (CGF.getContext().getLangOptions().ObjCNonFragileABI) + { + Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar); + return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar), + false, "ivar"); + } + uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar); + return llvm::ConstantInt::get(LongTy, Offset, "ivar"); +} + +CodeGen::CGObjCRuntime *CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM){ + return new CGObjCGNU(CGM); +} diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp new file mode 100644 index 000000000000..8f1404da65dd --- /dev/null +++ b/lib/CodeGen/CGObjCMac.cpp @@ -0,0 +1,5780 @@ +//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides Objective-C code generation targetting the Apple runtime. +// +//===----------------------------------------------------------------------===// + +#include "CGObjCRuntime.h" + +#include "CodeGenModule.h" +#include "CodeGenFunction.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/StmtObjC.h" +#include "clang/Basic/LangOptions.h" + +#include "llvm/Intrinsics.h" +#include "llvm/Module.h" +#include "llvm/ADT/DenseSet.h" +#include "llvm/Target/TargetData.h" +#include <sstream> + +using namespace clang; +using namespace CodeGen; + +// Common CGObjCRuntime functions, these don't belong here, but they +// don't belong in CGObjCRuntime either so we will live with it for +// now. + +/// FindIvarInterface - Find the interface containing the ivar. +/// +/// FIXME: We shouldn't need to do this, the containing context should +/// be fixed. +static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context, + const ObjCInterfaceDecl *OID, + const ObjCIvarDecl *OIVD, + unsigned &Index) { + // FIXME: The index here is closely tied to how + // ASTContext::getObjCLayout is implemented. This should be fixed to + // get the information from the layout directly. + Index = 0; + for (ObjCInterfaceDecl::ivar_iterator IVI = OID->ivar_begin(), + IVE = OID->ivar_end(); IVI != IVE; ++IVI, ++Index) + if (OIVD == *IVI) + return OID; + + // Also look in synthesized ivars. + llvm::SmallVector<ObjCIvarDecl*, 16> Ivars; + Context.CollectSynthesizedIvars(OID, Ivars); + for (unsigned k = 0, e = Ivars.size(); k != e; ++k) { + if (OIVD == Ivars[k]) + return OID; + ++Index; + } + + // Otherwise check in the super class. + if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) + return FindIvarInterface(Context, Super, OIVD, Index); + + return 0; +} + +static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM, + const ObjCInterfaceDecl *OID, + const ObjCImplementationDecl *ID, + const ObjCIvarDecl *Ivar) { + unsigned Index; + const ObjCInterfaceDecl *Container = + FindIvarInterface(CGM.getContext(), OID, Ivar, Index); + assert(Container && "Unable to find ivar container"); + + // If we know have an implementation (and the ivar is in it) then + // look up in the implementation layout. + const ASTRecordLayout *RL; + if (ID && ID->getClassInterface() == Container) + RL = &CGM.getContext().getASTObjCImplementationLayout(ID); + else + RL = &CGM.getContext().getASTObjCInterfaceLayout(Container); + return RL->getFieldOffset(Index); +} + +uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, + const ObjCInterfaceDecl *OID, + const ObjCIvarDecl *Ivar) { + return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8; +} + +uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, + const ObjCImplementationDecl *OID, + const ObjCIvarDecl *Ivar) { + return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8; +} + +LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, + const ObjCInterfaceDecl *OID, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers, + llvm::Value *Offset) { + // Compute (type*) ( (char *) BaseValue + Offset) + llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + QualType IvarTy = Ivar->getType(); + const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); + llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr); + V = CGF.Builder.CreateGEP(V, Offset, "add.ptr"); + V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); + + if (Ivar->isBitField()) { + // We need to compute the bit offset for the bit-field, the offset + // is to the byte. Note, there is a subtle invariant here: we can + // only call this routine on non-sythesized ivars but we may be + // called for synthesized ivars. However, a synthesized ivar can + // never be a bit-field so this is safe. + uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8; + + uint64_t BitFieldSize = + Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue(); + return LValue::MakeBitfield(V, BitOffset, BitFieldSize, + IvarTy->isSignedIntegerType(), + IvarTy.getCVRQualifiers()|CVRQualifiers); + } + + LValue LV = LValue::MakeAddr(V, IvarTy.getCVRQualifiers()|CVRQualifiers, + CGF.CGM.getContext().getObjCGCAttrKind(IvarTy)); + LValue::SetObjCIvar(LV, true); + return LV; +} + +/// + +namespace { + + typedef std::vector<llvm::Constant*> ConstantVector; + + // FIXME: We should find a nicer way to make the labels for metadata, string + // concatenation is lame. + +class ObjCCommonTypesHelper { +private: + llvm::Constant *getMessageSendFn() const { + // id objc_msgSend (id, SEL, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(SelectorPtrTy); + return + CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSend"); + } + + llvm::Constant *getMessageSendStretFn() const { + // id objc_msgSend_stret (id, SEL, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(SelectorPtrTy); + return + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, true), + "objc_msgSend_stret"); + + } + + llvm::Constant *getMessageSendFpretFn() const { + // FIXME: This should be long double on x86_64? + // [double | long double] objc_msgSend_fpret(id self, SEL op, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(SelectorPtrTy); + return + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::DoubleTy, + Params, + true), + "objc_msgSend_fpret"); + + } + + llvm::Constant *getMessageSendSuperFn() const { + // id objc_msgSendSuper(struct objc_super *super, SEL op, ...) + const char *SuperName = "objc_msgSendSuper"; + std::vector<const llvm::Type*> Params; + Params.push_back(SuperPtrTy); + Params.push_back(SelectorPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + SuperName); + } + + llvm::Constant *getMessageSendSuperFn2() const { + // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...) + const char *SuperName = "objc_msgSendSuper2"; + std::vector<const llvm::Type*> Params; + Params.push_back(SuperPtrTy); + Params.push_back(SelectorPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + SuperName); + } + + llvm::Constant *getMessageSendSuperStretFn() const { + // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super, + // SEL op, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(Int8PtrTy); + Params.push_back(SuperPtrTy); + Params.push_back(SelectorPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, true), + "objc_msgSendSuper_stret"); + } + + llvm::Constant *getMessageSendSuperStretFn2() const { + // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super, + // SEL op, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(Int8PtrTy); + Params.push_back(SuperPtrTy); + Params.push_back(SelectorPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, true), + "objc_msgSendSuper2_stret"); + } + + llvm::Constant *getMessageSendSuperFpretFn() const { + // There is no objc_msgSendSuper_fpret? How can that work? + return getMessageSendSuperFn(); + } + + llvm::Constant *getMessageSendSuperFpretFn2() const { + // There is no objc_msgSendSuper_fpret? How can that work? + return getMessageSendSuperFn2(); + } + +protected: + CodeGen::CodeGenModule &CGM; + +public: + const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy; + const llvm::Type *Int8PtrTy; + + /// ObjectPtrTy - LLVM type for object handles (typeof(id)) + const llvm::Type *ObjectPtrTy; + + /// PtrObjectPtrTy - LLVM type for id * + const llvm::Type *PtrObjectPtrTy; + + /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL)) + const llvm::Type *SelectorPtrTy; + /// ProtocolPtrTy - LLVM type for external protocol handles + /// (typeof(Protocol)) + const llvm::Type *ExternalProtocolPtrTy; + + // SuperCTy - clang type for struct objc_super. + QualType SuperCTy; + // SuperPtrCTy - clang type for struct objc_super *. + QualType SuperPtrCTy; + + /// SuperTy - LLVM type for struct objc_super. + const llvm::StructType *SuperTy; + /// SuperPtrTy - LLVM type for struct objc_super *. + const llvm::Type *SuperPtrTy; + + /// PropertyTy - LLVM type for struct objc_property (struct _prop_t + /// in GCC parlance). + const llvm::StructType *PropertyTy; + + /// PropertyListTy - LLVM type for struct objc_property_list + /// (_prop_list_t in GCC parlance). + const llvm::StructType *PropertyListTy; + /// PropertyListPtrTy - LLVM type for struct objc_property_list*. + const llvm::Type *PropertyListPtrTy; + + // MethodTy - LLVM type for struct objc_method. + const llvm::StructType *MethodTy; + + /// CacheTy - LLVM type for struct objc_cache. + const llvm::Type *CacheTy; + /// CachePtrTy - LLVM type for struct objc_cache *. + const llvm::Type *CachePtrTy; + + llvm::Constant *getGetPropertyFn() { + CodeGen::CodeGenTypes &Types = CGM.getTypes(); + ASTContext &Ctx = CGM.getContext(); + // id objc_getProperty (id, SEL, ptrdiff_t, bool) + llvm::SmallVector<QualType,16> Params; + QualType IdType = Ctx.getObjCIdType(); + QualType SelType = Ctx.getObjCSelType(); + Params.push_back(IdType); + Params.push_back(SelType); + Params.push_back(Ctx.LongTy); + Params.push_back(Ctx.BoolTy); + const llvm::FunctionType *FTy = + Types.GetFunctionType(Types.getFunctionInfo(IdType, Params), false); + return CGM.CreateRuntimeFunction(FTy, "objc_getProperty"); + } + + llvm::Constant *getSetPropertyFn() { + CodeGen::CodeGenTypes &Types = CGM.getTypes(); + ASTContext &Ctx = CGM.getContext(); + // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool) + llvm::SmallVector<QualType,16> Params; + QualType IdType = Ctx.getObjCIdType(); + QualType SelType = Ctx.getObjCSelType(); + Params.push_back(IdType); + Params.push_back(SelType); + Params.push_back(Ctx.LongTy); + Params.push_back(IdType); + Params.push_back(Ctx.BoolTy); + Params.push_back(Ctx.BoolTy); + const llvm::FunctionType *FTy = + Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params), false); + return CGM.CreateRuntimeFunction(FTy, "objc_setProperty"); + } + + llvm::Constant *getEnumerationMutationFn() { + // void objc_enumerationMutation (id) + std::vector<const llvm::Type*> Args; + Args.push_back(ObjectPtrTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation"); + } + + /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function. + llvm::Constant *getGcReadWeakFn() { + // id objc_read_weak (id *) + std::vector<const llvm::Type*> Args; + Args.push_back(ObjectPtrTy->getPointerTo()); + llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_read_weak"); + } + + /// GcAssignWeakFn -- LLVM objc_assign_weak function. + llvm::Constant *getGcAssignWeakFn() { + // id objc_assign_weak (id, id *) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + Args.push_back(ObjectPtrTy->getPointerTo()); + llvm::FunctionType *FTy = + llvm::FunctionType::get(ObjectPtrTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak"); + } + + /// GcAssignGlobalFn -- LLVM objc_assign_global function. + llvm::Constant *getGcAssignGlobalFn() { + // id objc_assign_global(id, id *) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + Args.push_back(ObjectPtrTy->getPointerTo()); + llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_assign_global"); + } + + /// GcAssignIvarFn -- LLVM objc_assign_ivar function. + llvm::Constant *getGcAssignIvarFn() { + // id objc_assign_ivar(id, id *) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + Args.push_back(ObjectPtrTy->getPointerTo()); + llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar"); + } + + /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function. + llvm::Constant *getGcAssignStrongCastFn() { + // id objc_assign_global(id, id *) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + Args.push_back(ObjectPtrTy->getPointerTo()); + llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast"); + } + + /// ExceptionThrowFn - LLVM objc_exception_throw function. + llvm::Constant *getExceptionThrowFn() { + // void objc_exception_throw(id) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); + } + + /// SyncEnterFn - LLVM object_sync_enter function. + llvm::Constant *getSyncEnterFn() { + // void objc_sync_enter (id) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); + } + + /// SyncExitFn - LLVM object_sync_exit function. + llvm::Constant *getSyncExitFn() { + // void objc_sync_exit (id) + std::vector<const llvm::Type*> Args(1, ObjectPtrTy); + llvm::FunctionType *FTy = + llvm::FunctionType::get(llvm::Type::VoidTy, Args, false); + return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); + } + + llvm::Constant *getSendFn(bool IsSuper) const { + return IsSuper ? getMessageSendSuperFn() : getMessageSendFn(); + } + + llvm::Constant *getSendFn2(bool IsSuper) const { + return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn(); + } + + llvm::Constant *getSendStretFn(bool IsSuper) const { + return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn(); + } + + llvm::Constant *getSendStretFn2(bool IsSuper) const { + return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn(); + } + + llvm::Constant *getSendFpretFn(bool IsSuper) const { + return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn(); + } + + llvm::Constant *getSendFpretFn2(bool IsSuper) const { + return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn(); + } + + ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm); + ~ObjCCommonTypesHelper(){} +}; + +/// ObjCTypesHelper - Helper class that encapsulates lazy +/// construction of varies types used during ObjC generation. +class ObjCTypesHelper : public ObjCCommonTypesHelper { +public: + /// SymtabTy - LLVM type for struct objc_symtab. + const llvm::StructType *SymtabTy; + /// SymtabPtrTy - LLVM type for struct objc_symtab *. + const llvm::Type *SymtabPtrTy; + /// ModuleTy - LLVM type for struct objc_module. + const llvm::StructType *ModuleTy; + + /// ProtocolTy - LLVM type for struct objc_protocol. + const llvm::StructType *ProtocolTy; + /// ProtocolPtrTy - LLVM type for struct objc_protocol *. + const llvm::Type *ProtocolPtrTy; + /// ProtocolExtensionTy - LLVM type for struct + /// objc_protocol_extension. + const llvm::StructType *ProtocolExtensionTy; + /// ProtocolExtensionTy - LLVM type for struct + /// objc_protocol_extension *. + const llvm::Type *ProtocolExtensionPtrTy; + /// MethodDescriptionTy - LLVM type for struct + /// objc_method_description. + const llvm::StructType *MethodDescriptionTy; + /// MethodDescriptionListTy - LLVM type for struct + /// objc_method_description_list. + const llvm::StructType *MethodDescriptionListTy; + /// MethodDescriptionListPtrTy - LLVM type for struct + /// objc_method_description_list *. + const llvm::Type *MethodDescriptionListPtrTy; + /// ProtocolListTy - LLVM type for struct objc_property_list. + const llvm::Type *ProtocolListTy; + /// ProtocolListPtrTy - LLVM type for struct objc_property_list*. + const llvm::Type *ProtocolListPtrTy; + /// CategoryTy - LLVM type for struct objc_category. + const llvm::StructType *CategoryTy; + /// ClassTy - LLVM type for struct objc_class. + const llvm::StructType *ClassTy; + /// ClassPtrTy - LLVM type for struct objc_class *. + const llvm::Type *ClassPtrTy; + /// ClassExtensionTy - LLVM type for struct objc_class_ext. + const llvm::StructType *ClassExtensionTy; + /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *. + const llvm::Type *ClassExtensionPtrTy; + // IvarTy - LLVM type for struct objc_ivar. + const llvm::StructType *IvarTy; + /// IvarListTy - LLVM type for struct objc_ivar_list. + const llvm::Type *IvarListTy; + /// IvarListPtrTy - LLVM type for struct objc_ivar_list *. + const llvm::Type *IvarListPtrTy; + /// MethodListTy - LLVM type for struct objc_method_list. + const llvm::Type *MethodListTy; + /// MethodListPtrTy - LLVM type for struct objc_method_list *. + const llvm::Type *MethodListPtrTy; + + /// ExceptionDataTy - LLVM type for struct _objc_exception_data. + const llvm::Type *ExceptionDataTy; + + /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function. + llvm::Constant *getExceptionTryEnterFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy)); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, false), + "objc_exception_try_enter"); + } + + /// ExceptionTryExitFn - LLVM objc_exception_try_exit function. + llvm::Constant *getExceptionTryExitFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy)); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, false), + "objc_exception_try_exit"); + } + + /// ExceptionExtractFn - LLVM objc_exception_extract function. + llvm::Constant *getExceptionExtractFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy)); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, false), + "objc_exception_extract"); + + } + + /// ExceptionMatchFn - LLVM objc_exception_match function. + llvm::Constant *getExceptionMatchFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(ClassPtrTy); + Params.push_back(ObjectPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + Params, false), + "objc_exception_match"); + + } + + /// SetJmpFn - LLVM _setjmp function. + llvm::Constant *getSetJmpFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(llvm::PointerType::getUnqual(llvm::Type::Int32Ty)); + return + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + Params, false), + "_setjmp"); + + } + +public: + ObjCTypesHelper(CodeGen::CodeGenModule &cgm); + ~ObjCTypesHelper() {} +}; + +/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's +/// modern abi +class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper { +public: + + // MethodListnfABITy - LLVM for struct _method_list_t + const llvm::StructType *MethodListnfABITy; + + // MethodListnfABIPtrTy - LLVM for struct _method_list_t* + const llvm::Type *MethodListnfABIPtrTy; + + // ProtocolnfABITy = LLVM for struct _protocol_t + const llvm::StructType *ProtocolnfABITy; + + // ProtocolnfABIPtrTy = LLVM for struct _protocol_t* + const llvm::Type *ProtocolnfABIPtrTy; + + // ProtocolListnfABITy - LLVM for struct _objc_protocol_list + const llvm::StructType *ProtocolListnfABITy; + + // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list* + const llvm::Type *ProtocolListnfABIPtrTy; + + // ClassnfABITy - LLVM for struct _class_t + const llvm::StructType *ClassnfABITy; + + // ClassnfABIPtrTy - LLVM for struct _class_t* + const llvm::Type *ClassnfABIPtrTy; + + // IvarnfABITy - LLVM for struct _ivar_t + const llvm::StructType *IvarnfABITy; + + // IvarListnfABITy - LLVM for struct _ivar_list_t + const llvm::StructType *IvarListnfABITy; + + // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t* + const llvm::Type *IvarListnfABIPtrTy; + + // ClassRonfABITy - LLVM for struct _class_ro_t + const llvm::StructType *ClassRonfABITy; + + // ImpnfABITy - LLVM for id (*)(id, SEL, ...) + const llvm::Type *ImpnfABITy; + + // CategorynfABITy - LLVM for struct _category_t + const llvm::StructType *CategorynfABITy; + + // New types for nonfragile abi messaging. + + // MessageRefTy - LLVM for: + // struct _message_ref_t { + // IMP messenger; + // SEL name; + // }; + const llvm::StructType *MessageRefTy; + // MessageRefCTy - clang type for struct _message_ref_t + QualType MessageRefCTy; + + // MessageRefPtrTy - LLVM for struct _message_ref_t* + const llvm::Type *MessageRefPtrTy; + // MessageRefCPtrTy - clang type for struct _message_ref_t* + QualType MessageRefCPtrTy; + + // MessengerTy - Type of the messenger (shown as IMP above) + const llvm::FunctionType *MessengerTy; + + // SuperMessageRefTy - LLVM for: + // struct _super_message_ref_t { + // SUPER_IMP messenger; + // SEL name; + // }; + const llvm::StructType *SuperMessageRefTy; + + // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t* + const llvm::Type *SuperMessageRefPtrTy; + + llvm::Constant *getMessageSendFixupFn() { + // id objc_msgSend_fixup(id, struct message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(MessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSend_fixup"); + } + + llvm::Constant *getMessageSendFpretFixupFn() { + // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(MessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSend_fpret_fixup"); + } + + llvm::Constant *getMessageSendStretFixupFn() { + // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(MessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSend_stret_fixup"); + } + + llvm::Constant *getMessageSendIdFixupFn() { + // id objc_msgSendId_fixup(id, struct message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(MessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSendId_fixup"); + } + + llvm::Constant *getMessageSendIdStretFixupFn() { + // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(ObjectPtrTy); + Params.push_back(MessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSendId_stret_fixup"); + } + llvm::Constant *getMessageSendSuper2FixupFn() { + // id objc_msgSendSuper2_fixup (struct objc_super *, + // struct _super_message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(SuperPtrTy); + Params.push_back(SuperMessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSendSuper2_fixup"); + } + + llvm::Constant *getMessageSendSuper2StretFixupFn() { + // id objc_msgSendSuper2_stret_fixup(struct objc_super *, + // struct _super_message_ref_t*, ...) + std::vector<const llvm::Type*> Params; + Params.push_back(SuperPtrTy); + Params.push_back(SuperMessageRefPtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, + Params, true), + "objc_msgSendSuper2_stret_fixup"); + } + + + + /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C + /// exception personality function. + llvm::Value *getEHPersonalityPtr() { + llvm::Constant *Personality = + CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::Int32Ty, + std::vector<const llvm::Type*>(), + true), + "__objc_personality_v0"); + return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy); + } + + llvm::Constant *getUnwindResumeOrRethrowFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(Int8PtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, false), + "_Unwind_Resume_or_Rethrow"); + } + + llvm::Constant *getObjCEndCatchFn() { + std::vector<const llvm::Type*> Params; + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::VoidTy, + Params, false), + "objc_end_catch"); + + } + + llvm::Constant *getObjCBeginCatchFn() { + std::vector<const llvm::Type*> Params; + Params.push_back(Int8PtrTy); + return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy, + Params, false), + "objc_begin_catch"); + } + + const llvm::StructType *EHTypeTy; + const llvm::Type *EHTypePtrTy; + + ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm); + ~ObjCNonFragileABITypesHelper(){} +}; + +class CGObjCCommonMac : public CodeGen::CGObjCRuntime { +public: + // FIXME - accessibility + class GC_IVAR { + public: + unsigned ivar_bytepos; + unsigned ivar_size; + GC_IVAR(unsigned bytepos = 0, unsigned size = 0) + : ivar_bytepos(bytepos), ivar_size(size) {} + + // Allow sorting based on byte pos. + bool operator<(const GC_IVAR &b) const { + return ivar_bytepos < b.ivar_bytepos; + } + }; + + class SKIP_SCAN { + public: + unsigned skip; + unsigned scan; + SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0) + : skip(_skip), scan(_scan) {} + }; + +protected: + CodeGen::CodeGenModule &CGM; + // FIXME! May not be needing this after all. + unsigned ObjCABI; + + // gc ivar layout bitmap calculation helper caches. + llvm::SmallVector<GC_IVAR, 16> SkipIvars; + llvm::SmallVector<GC_IVAR, 16> IvarsInfo; + + /// LazySymbols - Symbols to generate a lazy reference for. See + /// DefinedSymbols and FinishModule(). + std::set<IdentifierInfo*> LazySymbols; + + /// DefinedSymbols - External symbols which are defined by this + /// module. The symbols in this list and LazySymbols are used to add + /// special linker symbols which ensure that Objective-C modules are + /// linked properly. + std::set<IdentifierInfo*> DefinedSymbols; + + /// ClassNames - uniqued class names. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames; + + /// MethodVarNames - uniqued method variable names. + llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames; + + /// MethodVarTypes - uniqued method type signatures. We have to use + /// a StringMap here because have no other unique reference. + llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes; + + /// MethodDefinitions - map of methods which have been defined in + /// this translation unit. + llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions; + + /// PropertyNames - uniqued method variable names. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames; + + /// ClassReferences - uniqued class references. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences; + + /// SelectorReferences - uniqued selector references. + llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences; + + /// Protocols - Protocols for which an objc_protocol structure has + /// been emitted. Forward declarations are handled by creating an + /// empty structure whose initializer is filled in when/if defined. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols; + + /// DefinedProtocols - Protocols which have actually been + /// defined. We should not need this, see FIXME in GenerateProtocol. + llvm::DenseSet<IdentifierInfo*> DefinedProtocols; + + /// DefinedClasses - List of defined classes. + std::vector<llvm::GlobalValue*> DefinedClasses; + + /// DefinedNonLazyClasses - List of defined "non-lazy" classes. + std::vector<llvm::GlobalValue*> DefinedNonLazyClasses; + + /// DefinedCategories - List of defined categories. + std::vector<llvm::GlobalValue*> DefinedCategories; + + /// DefinedNonLazyCategories - List of defined "non-lazy" categories. + std::vector<llvm::GlobalValue*> DefinedNonLazyCategories; + + /// UsedGlobals - List of globals to pack into the llvm.used metadata + /// to prevent them from being clobbered. + std::vector<llvm::GlobalVariable*> UsedGlobals; + + /// GetNameForMethod - Return a name for the given method. + /// \param[out] NameOut - The return value. + void GetNameForMethod(const ObjCMethodDecl *OMD, + const ObjCContainerDecl *CD, + std::string &NameOut); + + /// GetMethodVarName - Return a unique constant for the given + /// selector's name. The return value has type char *. + llvm::Constant *GetMethodVarName(Selector Sel); + llvm::Constant *GetMethodVarName(IdentifierInfo *Ident); + llvm::Constant *GetMethodVarName(const std::string &Name); + + /// GetMethodVarType - Return a unique constant for the given + /// selector's name. The return value has type char *. + + // FIXME: This is a horrible name. + llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D); + llvm::Constant *GetMethodVarType(const FieldDecl *D); + + /// GetPropertyName - Return a unique constant for the given + /// name. The return value has type char *. + llvm::Constant *GetPropertyName(IdentifierInfo *Ident); + + // FIXME: This can be dropped once string functions are unified. + llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD, + const Decl *Container); + + /// GetClassName - Return a unique constant for the given selector's + /// name. The return value has type char *. + llvm::Constant *GetClassName(IdentifierInfo *Ident); + + /// BuildIvarLayout - Builds ivar layout bitmap for the class + /// implementation for the __strong or __weak case. + /// + llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI, + bool ForStrongLayout); + + void BuildAggrIvarRecordLayout(const RecordType *RT, + unsigned int BytePos, bool ForStrongLayout, + bool &HasUnion); + void BuildAggrIvarLayout(const ObjCImplementationDecl *OI, + const llvm::StructLayout *Layout, + const RecordDecl *RD, + const llvm::SmallVectorImpl<FieldDecl*> &RecFields, + unsigned int BytePos, bool ForStrongLayout, + bool &HasUnion); + + /// GetIvarLayoutName - Returns a unique constant for the given + /// ivar layout bitmap. + llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident, + const ObjCCommonTypesHelper &ObjCTypes); + + /// EmitPropertyList - Emit the given property list. The return + /// value has type PropertyListPtrTy. + llvm::Constant *EmitPropertyList(const std::string &Name, + const Decl *Container, + const ObjCContainerDecl *OCD, + const ObjCCommonTypesHelper &ObjCTypes); + + /// GetProtocolRef - Return a reference to the internal protocol + /// description, creating an empty one if it has not been + /// defined. The return value has type ProtocolPtrTy. + llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD); + + /// CreateMetadataVar - Create a global variable with internal + /// linkage for use by the Objective-C runtime. + /// + /// This is a convenience wrapper which not only creates the + /// variable, but also sets the section and alignment and adds the + /// global to the UsedGlobals list. + /// + /// \param Name - The variable name. + /// \param Init - The variable initializer; this is also used to + /// define the type of the variable. + /// \param Section - The section the variable should go into, or 0. + /// \param Align - The alignment for the variable, or 0. + /// \param AddToUsed - Whether the variable should be added to + /// "llvm.used". + llvm::GlobalVariable *CreateMetadataVar(const std::string &Name, + llvm::Constant *Init, + const char *Section, + unsigned Align, + bool AddToUsed); + + /// GetNamedIvarList - Return the list of ivars in the interface + /// itself (not including super classes and not including unnamed + /// bitfields). + /// + /// For the non-fragile ABI, this also includes synthesized property + /// ivars. + void GetNamedIvarList(const ObjCInterfaceDecl *OID, + llvm::SmallVector<ObjCIvarDecl*, 16> &Res) const; + + CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + llvm::Value *Sel, + llvm::Value *Arg0, + QualType Arg0Ty, + bool IsSuper, + const CallArgList &CallArgs, + const ObjCCommonTypesHelper &ObjCTypes); + +public: + CGObjCCommonMac(CodeGen::CodeGenModule &cgm) : CGM(cgm) + { } + + virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL); + + virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD, + const ObjCContainerDecl *CD=0); + + virtual void GenerateProtocol(const ObjCProtocolDecl *PD); + + /// GetOrEmitProtocol - Get the protocol object for the given + /// declaration, emitting it if necessary. The return value has type + /// ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0; + + /// GetOrEmitProtocolRef - Get a forward reference to the protocol + /// object for the given declaration, emitting it if needed. These + /// forward references will be filled in with empty bodies if no + /// definition is seen. The return value has type ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0; +}; + +class CGObjCMac : public CGObjCCommonMac { +private: + ObjCTypesHelper ObjCTypes; + /// EmitImageInfo - Emit the image info marker used to encode some module + /// level information. + void EmitImageInfo(); + + /// EmitModuleInfo - Another marker encoding module level + /// information. + void EmitModuleInfo(); + + /// EmitModuleSymols - Emit module symbols, the list of defined + /// classes and categories. The result has type SymtabPtrTy. + llvm::Constant *EmitModuleSymbols(); + + /// FinishModule - Write out global data structures at the end of + /// processing a translation unit. + void FinishModule(); + + /// EmitClassExtension - Generate the class extension structure used + /// to store the weak ivar layout and properties. The return value + /// has type ClassExtensionPtrTy. + llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID); + + /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, + /// for the given class. + llvm::Value *EmitClassRef(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Arg0, + QualType Arg0Ty, + bool IsSuper, + const CallArgList &CallArgs); + + /// EmitIvarList - Emit the ivar list for the given + /// implementation. If ForClass is true the list of class ivars + /// (i.e. metaclass ivars) is emitted, otherwise the list of + /// interface ivars will be emitted. The return value has type + /// IvarListPtrTy. + llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID, + bool ForClass); + + /// EmitMetaClass - Emit a forward reference to the class structure + /// for the metaclass of the given interface. The return value has + /// type ClassPtrTy. + llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID); + + /// EmitMetaClass - Emit a class structure for the metaclass of the + /// given implementation. The return value has type ClassPtrTy. + llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID, + llvm::Constant *Protocols, + const ConstantVector &Methods); + + llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD); + + llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD); + + /// EmitMethodList - Emit the method list for the given + /// implementation. The return value has type MethodListPtrTy. + llvm::Constant *EmitMethodList(const std::string &Name, + const char *Section, + const ConstantVector &Methods); + + /// EmitMethodDescList - Emit a method description list for a list of + /// method declarations. + /// - TypeName: The name for the type containing the methods. + /// - IsProtocol: True iff these methods are for a protocol. + /// - ClassMethds: True iff these are class methods. + /// - Required: When true, only "required" methods are + /// listed. Similarly, when false only "optional" methods are + /// listed. For classes this should always be true. + /// - begin, end: The method list to output. + /// + /// The return value has type MethodDescriptionListPtrTy. + llvm::Constant *EmitMethodDescList(const std::string &Name, + const char *Section, + const ConstantVector &Methods); + + /// GetOrEmitProtocol - Get the protocol object for the given + /// declaration, emitting it if necessary. The return value has type + /// ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD); + + /// GetOrEmitProtocolRef - Get a forward reference to the protocol + /// object for the given declaration, emitting it if needed. These + /// forward references will be filled in with empty bodies if no + /// definition is seen. The return value has type ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD); + + /// EmitProtocolExtension - Generate the protocol extension + /// structure used to store optional instance and class methods, and + /// protocol properties. The return value has type + /// ProtocolExtensionPtrTy. + llvm::Constant * + EmitProtocolExtension(const ObjCProtocolDecl *PD, + const ConstantVector &OptInstanceMethods, + const ConstantVector &OptClassMethods); + + /// EmitProtocolList - Generate the list of referenced + /// protocols. The return value has type ProtocolListPtrTy. + llvm::Constant *EmitProtocolList(const std::string &Name, + ObjCProtocolDecl::protocol_iterator begin, + ObjCProtocolDecl::protocol_iterator end); + + /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, + /// for the given selector. + llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel); + + public: + CGObjCMac(CodeGen::CodeGenModule &cgm); + + virtual llvm::Function *ModuleInitFunction(); + + virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs, + const ObjCMethodDecl *Method); + + virtual CodeGen::RValue + GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + const ObjCInterfaceDecl *Class, + bool isCategoryImpl, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs); + + virtual llvm::Value *GetClass(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel); + + /// The NeXT/Apple runtimes do not support typed selectors; just emit an + /// untyped one. + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, + const ObjCMethodDecl *Method); + + virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD); + + virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl); + + virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder, + const ObjCProtocolDecl *PD); + + virtual llvm::Constant *GetPropertyGetFunction(); + virtual llvm::Constant *GetPropertySetFunction(); + virtual llvm::Constant *EnumerationMutationFunction(); + + virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const Stmt &S); + virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtThrowStmt &S); + virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, + llvm::Value *AddrWeakObj); + virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst); + virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + + virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, + QualType ObjectTy, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers); + virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, + const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar); +}; + +class CGObjCNonFragileABIMac : public CGObjCCommonMac { +private: + ObjCNonFragileABITypesHelper ObjCTypes; + llvm::GlobalVariable* ObjCEmptyCacheVar; + llvm::GlobalVariable* ObjCEmptyVtableVar; + + /// SuperClassReferences - uniqued super class references. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences; + + /// MetaClassReferences - uniqued meta class references. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences; + + /// EHTypeReferences - uniqued class ehtype references. + llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences; + + /// NonLegacyDispatchMethods - List of methods for which we do *not* generate + /// legacy messaging dispatch. + llvm::DenseSet<Selector> NonLegacyDispatchMethods; + + /// LegacyDispatchedSelector - Returns true if SEL is not in the list of + /// NonLegacyDispatchMethods; false otherwise. + bool LegacyDispatchedSelector(Selector Sel); + + /// FinishNonFragileABIModule - Write out global data structures at the end of + /// processing a translation unit. + void FinishNonFragileABIModule(); + + /// AddModuleClassList - Add the given list of class pointers to the + /// module with the provided symbol and section names. + void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container, + const char *SymbolName, + const char *SectionName); + + llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags, + unsigned InstanceStart, + unsigned InstanceSize, + const ObjCImplementationDecl *ID); + llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName, + llvm::Constant *IsAGV, + llvm::Constant *SuperClassGV, + llvm::Constant *ClassRoGV, + bool HiddenVisibility); + + llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD); + + llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD); + + /// EmitMethodList - Emit the method list for the given + /// implementation. The return value has type MethodListnfABITy. + llvm::Constant *EmitMethodList(const std::string &Name, + const char *Section, + const ConstantVector &Methods); + /// EmitIvarList - Emit the ivar list for the given + /// implementation. If ForClass is true the list of class ivars + /// (i.e. metaclass ivars) is emitted, otherwise the list of + /// interface ivars will be emitted. The return value has type + /// IvarListnfABIPtrTy. + llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID); + + llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID, + const ObjCIvarDecl *Ivar, + unsigned long int offset); + + /// GetOrEmitProtocol - Get the protocol object for the given + /// declaration, emitting it if necessary. The return value has type + /// ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD); + + /// GetOrEmitProtocolRef - Get a forward reference to the protocol + /// object for the given declaration, emitting it if needed. These + /// forward references will be filled in with empty bodies if no + /// definition is seen. The return value has type ProtocolPtrTy. + virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD); + + /// EmitProtocolList - Generate the list of referenced + /// protocols. The return value has type ProtocolListPtrTy. + llvm::Constant *EmitProtocolList(const std::string &Name, + ObjCProtocolDecl::protocol_iterator begin, + ObjCProtocolDecl::protocol_iterator end); + + CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + QualType Arg0Ty, + bool IsSuper, + const CallArgList &CallArgs); + + /// GetClassGlobal - Return the global variable for the Objective-C + /// class of the given name. + llvm::GlobalVariable *GetClassGlobal(const std::string &Name); + + /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, + /// for the given class reference. + llvm::Value *EmitClassRef(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, + /// for the given super class reference. + llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + /// EmitMetaClassRef - Return a Value * of the address of _class_t + /// meta-data + llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + /// ObjCIvarOffsetVariable - Returns the ivar offset variable for + /// the given ivar. + /// + llvm::GlobalVariable * ObjCIvarOffsetVariable( + const ObjCInterfaceDecl *ID, + const ObjCIvarDecl *Ivar); + + /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, + /// for the given selector. + llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel); + + /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C + /// interface. The return value has type EHTypePtrTy. + llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID, + bool ForDefinition); + + const char *getMetaclassSymbolPrefix() const { + return "OBJC_METACLASS_$_"; + } + + const char *getClassSymbolPrefix() const { + return "OBJC_CLASS_$_"; + } + + void GetClassSizeInfo(const ObjCImplementationDecl *OID, + uint32_t &InstanceStart, + uint32_t &InstanceSize); + + // Shamelessly stolen from Analysis/CFRefCount.cpp + Selector GetNullarySelector(const char* name) const { + IdentifierInfo* II = &CGM.getContext().Idents.get(name); + return CGM.getContext().Selectors.getSelector(0, &II); + } + + Selector GetUnarySelector(const char* name) const { + IdentifierInfo* II = &CGM.getContext().Idents.get(name); + return CGM.getContext().Selectors.getSelector(1, &II); + } + + /// ImplementationIsNonLazy - Check whether the given category or + /// class implementation is "non-lazy". + bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const; + +public: + CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm); + // FIXME. All stubs for now! + virtual llvm::Function *ModuleInitFunction(); + + virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs, + const ObjCMethodDecl *Method); + + virtual CodeGen::RValue + GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + const ObjCInterfaceDecl *Class, + bool isCategoryImpl, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs); + + virtual llvm::Value *GetClass(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID); + + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel) + { return EmitSelector(Builder, Sel); } + + /// The NeXT/Apple runtimes do not support typed selectors; just emit an + /// untyped one. + virtual llvm::Value *GetSelector(CGBuilderTy &Builder, + const ObjCMethodDecl *Method) + { return EmitSelector(Builder, Method->getSelector()); } + + virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD); + + virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl); + virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder, + const ObjCProtocolDecl *PD); + + virtual llvm::Constant *GetPropertyGetFunction() { + return ObjCTypes.getGetPropertyFn(); + } + virtual llvm::Constant *GetPropertySetFunction() { + return ObjCTypes.getSetPropertyFn(); + } + virtual llvm::Constant *EnumerationMutationFunction() { + return ObjCTypes.getEnumerationMutationFn(); + } + + virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, + const Stmt &S); + virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, + const ObjCAtThrowStmt &S); + virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, + llvm::Value *AddrWeakObj); + virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dst); + virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, + llvm::Value *src, llvm::Value *dest); + virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, + QualType ObjectTy, + llvm::Value *BaseValue, + const ObjCIvarDecl *Ivar, + unsigned CVRQualifiers); + virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, + const ObjCInterfaceDecl *Interface, + const ObjCIvarDecl *Ivar); +}; + +} // end anonymous namespace + +/* *** Helper Functions *** */ + +/// getConstantGEP() - Help routine to construct simple GEPs. +static llvm::Constant *getConstantGEP(llvm::Constant *C, + unsigned idx0, + unsigned idx1) { + llvm::Value *Idxs[] = { + llvm::ConstantInt::get(llvm::Type::Int32Ty, idx0), + llvm::ConstantInt::get(llvm::Type::Int32Ty, idx1) + }; + return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2); +} + +/// hasObjCExceptionAttribute - Return true if this class or any super +/// class has the __objc_exception__ attribute. +static bool hasObjCExceptionAttribute(const ObjCInterfaceDecl *OID) { + if (OID->hasAttr<ObjCExceptionAttr>()) + return true; + if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) + return hasObjCExceptionAttribute(Super); + return false; +} + +/* *** CGObjCMac Public Interface *** */ + +CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm), + ObjCTypes(cgm) +{ + ObjCABI = 1; + EmitImageInfo(); +} + +/// GetClass - Return a reference to the class for the given interface +/// decl. +llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder, + const ObjCInterfaceDecl *ID) { + return EmitClassRef(Builder, ID); +} + +/// GetSelector - Return the pointer to the unique'd string for this selector. +llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) { + return EmitSelector(Builder, Sel); +} +llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl + *Method) { + return EmitSelector(Builder, Method->getSelector()); +} + +/// Generate a constant CFString object. +/* + struct __builtin_CFString { + const int *isa; // point to __CFConstantStringClassReference + int flags; + const char *str; + long length; + }; +*/ + +llvm::Constant *CGObjCCommonMac::GenerateConstantString( + const ObjCStringLiteral *SL) { + return CGM.GetAddrOfConstantCFString(SL->getString()); +} + +/// Generates a message send where the super is the receiver. This is +/// a message send to self with special delivery semantics indicating +/// which class's method should be called. +CodeGen::RValue +CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + const ObjCInterfaceDecl *Class, + bool isCategoryImpl, + llvm::Value *Receiver, + bool IsClassMessage, + const CodeGen::CallArgList &CallArgs) { + // Create and init a super structure; this is a (receiver, class) + // pair we will pass to objc_msgSendSuper. + llvm::Value *ObjCSuper = + CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super"); + llvm::Value *ReceiverAsObject = + CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); + CGF.Builder.CreateStore(ReceiverAsObject, + CGF.Builder.CreateStructGEP(ObjCSuper, 0)); + + // If this is a class message the metaclass is passed as the target. + llvm::Value *Target; + if (IsClassMessage) { + if (isCategoryImpl) { + // Message sent to 'super' in a class method defined in a category + // implementation requires an odd treatment. + // If we are in a class method, we must retrieve the + // _metaclass_ for the current class, pointed at by + // the class's "isa" pointer. The following assumes that + // isa" is the first ivar in a class (which it must be). + Target = EmitClassRef(CGF.Builder, Class->getSuperClass()); + Target = CGF.Builder.CreateStructGEP(Target, 0); + Target = CGF.Builder.CreateLoad(Target); + } + else { + llvm::Value *MetaClassPtr = EmitMetaClassRef(Class); + llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1); + llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr); + Target = Super; + } + } else { + Target = EmitClassRef(CGF.Builder, Class->getSuperClass()); + } + // FIXME: We shouldn't need to do this cast, rectify the ASTContext and + // ObjCTypes types. + const llvm::Type *ClassTy = + CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType()); + Target = CGF.Builder.CreateBitCast(Target, ClassTy); + CGF.Builder.CreateStore(Target, + CGF.Builder.CreateStructGEP(ObjCSuper, 1)); + return EmitLegacyMessageSend(CGF, ResultType, + EmitSelector(CGF.Builder, Sel), + ObjCSuper, ObjCTypes.SuperPtrCTy, + true, CallArgs, ObjCTypes); +} + +/// Generate code for a message send expression. +CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, + QualType ResultType, + Selector Sel, + llvm::Value *Receiver, + bool IsClassMessage, + const CallArgList &CallArgs, + const ObjCMethodDecl *Method) { + return EmitLegacyMessageSend(CGF, ResultType, + EmitSelector(CGF.Builder, Sel), + Receiver, CGF.getContext().getObjCIdType(), + false, CallArgs, ObjCTypes); +} + +CodeGen::RValue CGObjCCommonMac::EmitLegacyMessageSend( + CodeGen::CodeGenFunction &CGF, + QualType ResultType, + llvm::Value *Sel, + llvm::Value *Arg0, + QualType Arg0Ty, + bool IsSuper, + const CallArgList &CallArgs, + const ObjCCommonTypesHelper &ObjCTypes) { + CallArgList ActualArgs; + if (!IsSuper) + Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp"); + ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty)); + ActualArgs.push_back(std::make_pair(RValue::get(Sel), + CGF.getContext().getObjCSelType())); + ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end()); + + CodeGenTypes &Types = CGM.getTypes(); + const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs); + // In 64bit ABI, type must be assumed VARARG. In 32bit abi, + // it seems not to matter. + const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo, (ObjCABI == 2)); + + llvm::Constant *Fn = NULL; + if (CGM.ReturnTypeUsesSret(FnInfo)) { + Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper) + : ObjCTypes.getSendStretFn(IsSuper); + } else if (ResultType->isFloatingType()) { + if (ObjCABI == 2) { + if (const BuiltinType *BT = ResultType->getAsBuiltinType()) { + BuiltinType::Kind k = BT->getKind(); + Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper) + : ObjCTypes.getSendFn2(IsSuper); + } + } + else + // FIXME. This currently matches gcc's API for x86-32. May need to change + // for others if we have their API. + Fn = ObjCTypes.getSendFpretFn(IsSuper); + } else { + Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper) + : ObjCTypes.getSendFn(IsSuper); + } + assert(Fn && "EmitLegacyMessageSend - unknown API"); + Fn = llvm::ConstantExpr::getBitCast(Fn, llvm::PointerType::getUnqual(FTy)); + return CGF.EmitCall(FnInfo, Fn, ActualArgs); +} + +llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder, + const ObjCProtocolDecl *PD) { + // FIXME: I don't understand why gcc generates this, or where it is + // resolved. Investigate. Its also wasteful to look this up over and over. + LazySymbols.insert(&CGM.getContext().Idents.get("Protocol")); + + return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD), + ObjCTypes.ExternalProtocolPtrTy); +} + +void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) { + // FIXME: We shouldn't need this, the protocol decl should contain enough + // information to tell us whether this was a declaration or a definition. + DefinedProtocols.insert(PD->getIdentifier()); + + // If we have generated a forward reference to this protocol, emit + // it now. Otherwise do nothing, the protocol objects are lazily + // emitted. + if (Protocols.count(PD->getIdentifier())) + GetOrEmitProtocol(PD); +} + +llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) { + if (DefinedProtocols.count(PD->getIdentifier())) + return GetOrEmitProtocol(PD); + return GetOrEmitProtocolRef(PD); +} + +/* + // APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions + struct _objc_protocol { + struct _objc_protocol_extension *isa; + char *protocol_name; + struct _objc_protocol_list *protocol_list; + struct _objc__method_prototype_list *instance_methods; + struct _objc__method_prototype_list *class_methods + }; + + See EmitProtocolExtension(). +*/ +llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) { + llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()]; + + // Early exit if a defining object has already been generated. + if (Entry && Entry->hasInitializer()) + return Entry; + + // FIXME: I don't understand why gcc generates this, or where it is + // resolved. Investigate. Its also wasteful to look this up over and over. + LazySymbols.insert(&CGM.getContext().Idents.get("Protocol")); + + const char *ProtocolName = PD->getNameAsCString(); + + // Construct method lists. + std::vector<llvm::Constant*> InstanceMethods, ClassMethods; + std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods; + for (ObjCProtocolDecl::instmeth_iterator + i = PD->instmeth_begin(CGM.getContext()), + e = PD->instmeth_end(CGM.getContext()); i != e; ++i) { + ObjCMethodDecl *MD = *i; + llvm::Constant *C = GetMethodDescriptionConstant(MD); + if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { + OptInstanceMethods.push_back(C); + } else { + InstanceMethods.push_back(C); + } + } + + for (ObjCProtocolDecl::classmeth_iterator + i = PD->classmeth_begin(CGM.getContext()), + e = PD->classmeth_end(CGM.getContext()); i != e; ++i) { + ObjCMethodDecl *MD = *i; + llvm::Constant *C = GetMethodDescriptionConstant(MD); + if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { + OptClassMethods.push_back(C); + } else { + ClassMethods.push_back(C); + } + } + + std::vector<llvm::Constant*> Values(5); + Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods); + Values[1] = GetClassName(PD->getIdentifier()); + Values[2] = + EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getNameAsString(), + PD->protocol_begin(), + PD->protocol_end()); + Values[3] = + EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + + PD->getNameAsString(), + "__OBJC,__cat_inst_meth,regular,no_dead_strip", + InstanceMethods); + Values[4] = + EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + + PD->getNameAsString(), + "__OBJC,__cat_cls_meth,regular,no_dead_strip", + ClassMethods); + llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy, + Values); + + if (Entry) { + // Already created, fix the linkage and update the initializer. + Entry->setLinkage(llvm::GlobalValue::InternalLinkage); + Entry->setInitializer(Init); + } else { + Entry = + new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false, + llvm::GlobalValue::InternalLinkage, + Init, + std::string("\01L_OBJC_PROTOCOL_")+ProtocolName, + &CGM.getModule()); + Entry->setSection("__OBJC,__protocol,regular,no_dead_strip"); + Entry->setAlignment(4); + UsedGlobals.push_back(Entry); + // FIXME: Is this necessary? Why only for protocol? + Entry->setAlignment(4); + } + + return Entry; +} + +llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) { + llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()]; + + if (!Entry) { + // We use the initializer as a marker of whether this is a forward + // reference or not. At module finalization we add the empty + // contents for protocols which were referenced but never defined. + Entry = + new llvm::GlobalVariable(ObjCTypes.ProtocolTy, false, + llvm::GlobalValue::ExternalLinkage, + 0, + "\01L_OBJC_PROTOCOL_" + PD->getNameAsString(), + &CGM.getModule()); + Entry->setSection("__OBJC,__protocol,regular,no_dead_strip"); + Entry->setAlignment(4); + UsedGlobals.push_back(Entry); + // FIXME: Is this necessary? Why only for protocol? + Entry->setAlignment(4); + } + + return Entry; +} + +/* + struct _objc_protocol_extension { + uint32_t size; + struct objc_method_description_list *optional_instance_methods; + struct objc_method_description_list *optional_class_methods; + struct objc_property_list *instance_properties; + }; +*/ +llvm::Constant * +CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD, + const ConstantVector &OptInstanceMethods, + const ConstantVector &OptClassMethods) { + uint64_t Size = + CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy); + std::vector<llvm::Constant*> Values(4); + Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); + Values[1] = + EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + + PD->getNameAsString(), + "__OBJC,__cat_inst_meth,regular,no_dead_strip", + OptInstanceMethods); + Values[2] = + EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + + PD->getNameAsString(), + "__OBJC,__cat_cls_meth,regular,no_dead_strip", + OptClassMethods); + Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + + PD->getNameAsString(), + 0, PD, ObjCTypes); + + // Return null if no extension bits are used. + if (Values[1]->isNullValue() && Values[2]->isNullValue() && + Values[3]->isNullValue()) + return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy); + + llvm::Constant *Init = + llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values); + + // No special section, but goes in llvm.used + return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getNameAsString(), + Init, + 0, 0, true); +} + +/* + struct objc_protocol_list { + struct objc_protocol_list *next; + long count; + Protocol *list[]; + }; +*/ +llvm::Constant * +CGObjCMac::EmitProtocolList(const std::string &Name, + ObjCProtocolDecl::protocol_iterator begin, + ObjCProtocolDecl::protocol_iterator end) { + std::vector<llvm::Constant*> ProtocolRefs; + + for (; begin != end; ++begin) + ProtocolRefs.push_back(GetProtocolRef(*begin)); + + // Just return null for empty protocol lists + if (ProtocolRefs.empty()) + return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); + + // This list is null terminated. + ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy)); + + std::vector<llvm::Constant*> Values(3); + // This field is only used by the runtime. + Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); + Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1); + Values[2] = + llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy, + ProtocolRefs.size()), + ProtocolRefs); + + llvm::Constant *Init = llvm::ConstantStruct::get(Values); + llvm::GlobalVariable *GV = + CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip", + 4, false); + return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy); +} + +/* + struct _objc_property { + const char * const name; + const char * const attributes; + }; + + struct _objc_property_list { + uint32_t entsize; // sizeof (struct _objc_property) + uint32_t prop_count; + struct _objc_property[prop_count]; + }; +*/ +llvm::Constant *CGObjCCommonMac::EmitPropertyList(const std::string &Name, + const Decl *Container, + const ObjCContainerDecl *OCD, + const ObjCCommonTypesHelper &ObjCTypes) { + std::vector<llvm::Constant*> Properties, Prop(2); + for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(CGM.getContext()), + E = OCD->prop_end(CGM.getContext()); I != E; ++I) { + const ObjCPropertyDecl *PD = *I; + Prop[0] = GetPropertyName(PD->getIdentifier()); + Prop[1] = GetPropertyTypeString(PD, Container); + Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, + Prop)); + } + + // Return null for empty list. + if (Properties.empty()) + return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); + + unsigned PropertySize = + CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy); + std::vector<llvm::Constant*> Values(3); + Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize); + Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size()); + llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy, + Properties.size()); + Values[2] = llvm::ConstantArray::get(AT, Properties); + llvm::Constant *Init = llvm::ConstantStruct::get(Values); + + llvm::GlobalVariable *GV = + CreateMetadataVar(Name, Init, + (ObjCABI == 2) ? "__DATA, __objc_const" : + "__OBJC,__property,regular,no_dead_strip", + (ObjCABI == 2) ? 8 : 4, + true); + return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy); +} + +/* + struct objc_method_description_list { + int count; + struct objc_method_description list[]; + }; +*/ +llvm::Constant * +CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) { + std::vector<llvm::Constant*> Desc(2); + Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()), + ObjCTypes.SelectorPtrTy); + Desc[1] = GetMethodVarType(MD); + return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy, + Desc); +} + +llvm::Constant *CGObjCMac::EmitMethodDescList(const std::string &Name, + const char *Section, + const ConstantVector &Methods) { + // Return null for empty list. + if (Methods.empty()) + return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy); + + std::vector<llvm::Constant*> Values(2); + Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size()); + llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy, + Methods.size()); + Values[1] = llvm::ConstantArray::get(AT, Methods); + llvm::Constant *Init = llvm::ConstantStruct::get(Values); + + llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true); + return llvm::ConstantExpr::getBitCast(GV, + ObjCTypes.MethodDescriptionListPtrTy); +} + +/* + struct _objc_category { + char *category_name; + char *class_name; + struct _objc_method_list *instance_methods; + struct _objc_method_list *class_methods; + struct _objc_protocol_list *protocols; + uint32_t size; // <rdar://4585769> + struct _objc_property_list *instance_properties; + }; + */ +void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) { + unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy); + + // FIXME: This is poor design, the OCD should have a pointer to the category + // decl. Additionally, note that Category can be null for the @implementation + // w/o an @interface case. Sema should just create one for us as it does for + // @implementation so everyone else can live life under a clear blue sky. + const ObjCInterfaceDecl *Interface = OCD->getClassInterface(); + const ObjCCategoryDecl *Category = + Interface->FindCategoryDeclaration(OCD->getIdentifier()); + std::string ExtName(Interface->getNameAsString() + "_" + + OCD->getNameAsString()); + + std::vector<llvm::Constant*> InstanceMethods, ClassMethods; + for (ObjCCategoryImplDecl::instmeth_iterator + i = OCD->instmeth_begin(CGM.getContext()), + e = OCD->instmeth_end(CGM.getContext()); i != e; ++i) { + // Instance methods should always be defined. + InstanceMethods.push_back(GetMethodConstant(*i)); + } + for (ObjCCategoryImplDecl::classmeth_iterator + i = OCD->classmeth_begin(CGM.getContext()), + e = OCD->classmeth_end(CGM.getContext()); i != e; ++i) { + // Class methods should always be defined. + ClassMethods.push_back(GetMethodConstant(*i)); + } + + std::vector<llvm::Constant*> Values(7); + Values[0] = GetClassName(OCD->getIdentifier()); + Values[1] = GetClassName(Interface->getIdentifier()); + LazySymbols.insert(Interface->getIdentifier()); + Values[2] = + EmitMethodList(std::string("\01L_OBJC_CATEGORY_INSTANCE_METHODS_") + + ExtName, + "__OBJC,__cat_inst_meth,regular,no_dead_strip", + InstanceMethods); + Values[3] = + EmitMethodList(std::string("\01L_OBJC_CATEGORY_CLASS_METHODS_") + ExtName, + "__OBJC,__cat_cls_meth,regular,no_dead_strip", + ClassMethods); + if (Category) { + Values[4] = + EmitProtocolList(std::string("\01L_OBJC_CATEGORY_PROTOCOLS_") + ExtName, + Category->protocol_begin(), + Category->protocol_end()); + } else { + Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); + } + Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); + + // If there is no category @interface then there can be no properties. + if (Category) { + Values[6] = EmitPropertyList(std::string("\01l_OBJC_$_PROP_LIST_") + ExtName, + OCD, Category, ObjCTypes); + } else { + Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); + } + + llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy, + Values); + + llvm::GlobalVariable *GV = + CreateMetadataVar(std::string("\01L_OBJC_CATEGORY_")+ExtName, Init, + "__OBJC,__category,regular,no_dead_strip", + 4, true); + DefinedCategories.push_back(GV); +} + +// FIXME: Get from somewhere? +enum ClassFlags { + eClassFlags_Factory = 0x00001, + eClassFlags_Meta = 0x00002, + // <rdr://5142207> + eClassFlags_HasCXXStructors = 0x02000, + eClassFlags_Hidden = 0x20000, + eClassFlags_ABI2_Hidden = 0x00010, + eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634> +}; + +/* + struct _objc_class { + Class isa; + Class super_class; + const char *name; + long version; + long info; + long instance_size; + struct _objc_ivar_list *ivars; + struct _objc_method_list *methods; + struct _objc_cache *cache; + struct _objc_protocol_list *protocols; + // Objective-C 1.0 extensions (<rdr://4585769>) + const char *ivar_layout; + struct _objc_class_ext *ext; + }; + + See EmitClassExtension(); + */ +void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) { + DefinedSymbols.insert(ID->getIdentifier()); + + std::string ClassName = ID->getNameAsString(); + // FIXME: Gross + ObjCInterfaceDecl *Interface = + const_cast<ObjCInterfaceDecl*>(ID->getClassInterface()); + llvm::Constant *Protocols = + EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getNameAsString(), + Interface->protocol_begin(), + Interface->protocol_end()); + unsigned Flags = eClassFlags_Factory; + unsigned Size = + CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8; + + // FIXME: Set CXX-structors flag. + if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden) + Flags |= eClassFlags_Hidden; + + std::vector<llvm::Constant*> InstanceMethods, ClassMethods; + for (ObjCImplementationDecl::instmeth_iterator + i = ID->instmeth_begin(CGM.getContext()), + e = ID->instmeth_end(CGM.getContext()); i != e; ++i) { + // Instance methods should always be defined. + InstanceMethods.push_back(GetMethodConstant(*i)); + } + for (ObjCImplementationDecl::classmeth_iterator + i = ID->classmeth_begin(CGM.getContext()), + e = ID->classmeth_end(CGM.getContext()); i != e; ++i) { + // Class methods should always be defined. + ClassMethods.push_back(GetMethodConstant(*i)); + } + + for (ObjCImplementationDecl::propimpl_iterator + i = ID->propimpl_begin(CGM.getContext()), + e = ID->propimpl_end(CGM.getContext()); i != e; ++i) { + ObjCPropertyImplDecl *PID = *i; + + if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { + ObjCPropertyDecl *PD = PID->getPropertyDecl(); + + if (ObjCMethodDecl *MD = PD->getGetterMethodDecl()) + if (llvm::Constant *C = GetMethodConstant(MD)) + InstanceMethods.push_back(C); + if (ObjCMethodDecl *MD = PD->getSetterMethodDecl()) + if (llvm::Constant *C = GetMethodConstant(MD)) + InstanceMethods.push_back(C); + } + } + + std::vector<llvm::Constant*> Values(12); + Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods); + if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) { + // Record a reference to the super class. + LazySymbols.insert(Super->getIdentifier()); + + Values[ 1] = + llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()), + ObjCTypes.ClassPtrTy); + } else { + Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy); + } + Values[ 2] = GetClassName(ID->getIdentifier()); + // Version is always 0. + Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0); + Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags); + Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size); + Values[ 6] = EmitIvarList(ID, false); + Values[ 7] = + EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getNameAsString(), + "__OBJC,__inst_meth,regular,no_dead_strip", + InstanceMethods); + // cache is always NULL. + Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy); + Values[ 9] = Protocols; + Values[10] = BuildIvarLayout(ID, true); + Values[11] = EmitClassExtension(ID); + llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy, + Values); + + llvm::GlobalVariable *GV = + CreateMetadataVar(std::string("\01L_OBJC_CLASS_")+ClassName, Init, + "__OBJC,__class,regular,no_dead_strip", + 4, true); + DefinedClasses.push_back(GV); +} + +llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID, + llvm::Constant *Protocols, + const ConstantVector &Methods) { + unsigned Flags = eClassFlags_Meta; + unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy); + + if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden) + Flags |= eClassFlags_Hidden; + + std::vector<llvm::Constant*> Values(12); + // The isa for the metaclass is the root of the hierarchy. + const ObjCInterfaceDecl *Root = ID->getClassInterface(); + while (const ObjCInterfaceDecl *Super = Root->getSuperClass()) + Root = Super; + Values[ 0] = + llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()), + ObjCTypes.ClassPtrTy); + // The super class for the metaclass is emitted as the name of the + // super class. The runtime fixes this up to point to the + // *metaclass* for the super class. + if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) { + Values[ 1] = + llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()), + ObjCTypes.ClassPtrTy); + } else { + Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy); + } + Values[ 2] = GetClassName(ID->getIdentifier()); + // Version is always 0. + Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0); + Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags); + Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size); + Values[ 6] = EmitIvarList(ID, true); + Values[ 7] = + EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(), + "__OBJC,__cls_meth,regular,no_dead_strip", + Methods); + // cache is always NULL. + Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy); + Values[ 9] = Protocols; + // ivar_layout for metaclass is always NULL. + Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); + // The class extension is always unused for metaclasses. + Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy); + llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy, + Values); + + std::string Name("\01L_OBJC_METACLASS_"); + Name += ID->getNameAsCString(); + + // Check for a forward reference. + llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name); + if (GV) { + assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && + "Forward metaclass reference has incorrect type."); + GV->setLinkage(llvm::GlobalValue::InternalLinkage); + GV->setInitializer(Init); + } else { + GV = new llvm::GlobalVariable(ObjCTypes.ClassTy, false, + llvm::GlobalValue::InternalLinkage, + Init, Name, + &CGM.getModule()); + } + GV->setSection("__OBJC,__meta_class,regular,no_dead_strip"); + GV->setAlignment(4); + UsedGlobals.push_back(GV); + + return GV; +} + +llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) { + std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString(); + + // FIXME: Should we look these up somewhere other than the module. Its a bit + // silly since we only generate these while processing an implementation, so + // exactly one pointer would work if know when we entered/exitted an + // implementation block. + + // Check for an existing forward reference. + // Previously, metaclass with internal linkage may have been defined. + // pass 'true' as 2nd argument so it is returned. + if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true)) { + assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && + "Forward metaclass reference has incorrect type."); + return GV; + } else { + // Generate as an external reference to keep a consistent + // module. This will be patched up when we emit the metaclass. + return new llvm::GlobalVariable(ObjCTypes.ClassTy, false, + llvm::GlobalValue::ExternalLinkage, + 0, + Name, + &CGM.getModule()); + } +} + +/* + struct objc_class_ext { + uint32_t size; + const char *weak_ivar_layout; + struct _objc_property_list *properties; + }; +*/ +llvm::Constant * +CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) { + uint64_t Size = + CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy); + + std::vector<llvm::Constant*> Values(3); + Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); + Values[1] = BuildIvarLayout(ID, false); + Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getNameAsString(), + ID, ID->getClassInterface(), ObjCTypes); + + // Return null if no extension bits are used. + if (Values[1]->isNullValue() && Values[2]->isNullValue()) + return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy); + + llvm::Constant *Init = + llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values); + return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getNameAsString(), + Init, "__OBJC,__class_ext,regular,no_dead_strip", + 4, true); +} + +/* + struct objc_ivar { + char *ivar_name; + char *ivar_type; + int ivar_offset; + }; + + struct objc_ivar_list { + int ivar_count; + struct objc_ivar list[count]; + }; + */ +llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID, |