LLVM API Documentation

Unix/Memory.inc
Go to the documentation of this file.
00001 //===- Unix/Memory.cpp - Generic UNIX System Configuration ------*- C++ -*-===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file defines some functions for various memory management utilities.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "Unix.h"
00015 #include "llvm/Support/DataTypes.h"
00016 #include "llvm/Support/ErrorHandling.h"
00017 #include "llvm/Support/Process.h"
00018 
00019 #ifdef HAVE_SYS_MMAN_H
00020 #include <sys/mman.h>
00021 #endif
00022 
00023 #ifdef __APPLE__
00024 #include <mach/mach.h>
00025 #endif
00026 
00027 #if defined(__mips__)
00028 #  if defined(__OpenBSD__)
00029 #    include <mips64/sysarch.h>
00030 #  else
00031 #    include <sys/cachectl.h>
00032 #  endif
00033 #endif
00034 
00035 #ifdef __APPLE__
00036 extern "C" void sys_icache_invalidate(const void *Addr, size_t len);
00037 #else
00038 extern "C" void __clear_cache(void *, void*);
00039 #endif
00040 
00041 namespace {
00042 
00043 int getPosixProtectionFlags(unsigned Flags) {
00044   switch (Flags) {
00045   case llvm::sys::Memory::MF_READ:
00046     return PROT_READ;
00047   case llvm::sys::Memory::MF_WRITE:
00048     return PROT_WRITE;
00049   case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE:
00050     return PROT_READ | PROT_WRITE;
00051   case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC:
00052     return PROT_READ | PROT_EXEC;
00053   case llvm::sys::Memory::MF_READ |
00054    llvm::sys::Memory::MF_WRITE |
00055    llvm::sys::Memory::MF_EXEC:
00056     return PROT_READ | PROT_WRITE | PROT_EXEC;
00057   case llvm::sys::Memory::MF_EXEC:
00058 #if defined(__FreeBSD__)
00059     // On PowerPC, having an executable page that has no read permission
00060     // can have unintended consequences.  The function InvalidateInstruction-
00061     // Cache uses instructions dcbf and icbi, both of which are treated by
00062     // the processor as loads.  If the page has no read permissions,
00063     // executing these instructions will result in a segmentation fault.
00064     // Somehow, this problem is not present on Linux, but it does happen
00065     // on FreeBSD.
00066     return PROT_READ | PROT_EXEC;
00067 #else
00068     return PROT_EXEC;
00069 #endif
00070   default:
00071     llvm_unreachable("Illegal memory protection flag specified!");
00072   }
00073   // Provide a default return value as required by some compilers.
00074   return PROT_NONE;
00075 }
00076 
00077 } // namespace
00078 
00079 namespace llvm {
00080 namespace sys {
00081 
00082 MemoryBlock
00083 Memory::allocateMappedMemory(size_t NumBytes,
00084                              const MemoryBlock *const NearBlock,
00085                              unsigned PFlags,
00086                              std::error_code &EC) {
00087   EC = std::error_code();
00088   if (NumBytes == 0)
00089     return MemoryBlock();
00090 
00091   static const size_t PageSize = process::get_self()->page_size();
00092   const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
00093 
00094   int fd = -1;
00095 #ifdef NEED_DEV_ZERO_FOR_MMAP
00096   static int zero_fd = open("/dev/zero", O_RDWR);
00097   if (zero_fd == -1) {
00098     EC = std::error_code(errno, std::generic_category());
00099     return MemoryBlock();
00100   }
00101   fd = zero_fd;
00102 #endif
00103 
00104   int MMFlags = MAP_PRIVATE |
00105 #ifdef HAVE_MMAP_ANONYMOUS
00106   MAP_ANONYMOUS
00107 #else
00108   MAP_ANON
00109 #endif
00110   ; // Ends statement above
00111 
00112   int Protect = getPosixProtectionFlags(PFlags);
00113 
00114   // Use any near hint and the page size to set a page-aligned starting address
00115   uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
00116                                       NearBlock->size() : 0;
00117   if (Start && Start % PageSize)
00118     Start += PageSize - Start % PageSize;
00119 
00120   void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages,
00121                       Protect, MMFlags, fd, 0);
00122   if (Addr == MAP_FAILED) {
00123     if (NearBlock) //Try again without a near hint
00124       return allocateMappedMemory(NumBytes, nullptr, PFlags, EC);
00125 
00126     EC = std::error_code(errno, std::generic_category());
00127     return MemoryBlock();
00128   }
00129 
00130   MemoryBlock Result;
00131   Result.Address = Addr;
00132   Result.Size = NumPages*PageSize;
00133 
00134   if (PFlags & MF_EXEC)
00135     Memory::InvalidateInstructionCache(Result.Address, Result.Size);
00136 
00137   return Result;
00138 }
00139 
00140 std::error_code
00141 Memory::releaseMappedMemory(MemoryBlock &M) {
00142   if (M.Address == nullptr || M.Size == 0)
00143     return std::error_code();
00144 
00145   if (0 != ::munmap(M.Address, M.Size))
00146     return std::error_code(errno, std::generic_category());
00147 
00148   M.Address = nullptr;
00149   M.Size = 0;
00150 
00151   return std::error_code();
00152 }
00153 
00154 std::error_code
00155 Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
00156   if (M.Address == nullptr || M.Size == 0)
00157     return std::error_code();
00158 
00159   if (!Flags)
00160     return std::error_code(EINVAL, std::generic_category());
00161 
00162   int Protect = getPosixProtectionFlags(Flags);
00163 
00164   int Result = ::mprotect(M.Address, M.Size, Protect);
00165   if (Result != 0)
00166     return std::error_code(errno, std::generic_category());
00167 
00168   if (Flags & MF_EXEC)
00169     Memory::InvalidateInstructionCache(M.Address, M.Size);
00170 
00171   return std::error_code();
00172 }
00173 
00174 /// AllocateRWX - Allocate a slab of memory with read/write/execute
00175 /// permissions.  This is typically used for JIT applications where we want
00176 /// to emit code to the memory then jump to it.  Getting this type of memory
00177 /// is very OS specific.
00178 ///
00179 MemoryBlock
00180 Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock,
00181                     std::string *ErrMsg) {
00182   if (NumBytes == 0) return MemoryBlock();
00183 
00184   size_t PageSize = process::get_self()->page_size();
00185   size_t NumPages = (NumBytes+PageSize-1)/PageSize;
00186 
00187   int fd = -1;
00188 #ifdef NEED_DEV_ZERO_FOR_MMAP
00189   static int zero_fd = open("/dev/zero", O_RDWR);
00190   if (zero_fd == -1) {
00191     MakeErrMsg(ErrMsg, "Can't open /dev/zero device");
00192     return MemoryBlock();
00193   }
00194   fd = zero_fd;
00195 #endif
00196 
00197   int flags = MAP_PRIVATE |
00198 #ifdef HAVE_MMAP_ANONYMOUS
00199   MAP_ANONYMOUS
00200 #else
00201   MAP_ANON
00202 #endif
00203   ;
00204 
00205   void* start = NearBlock ? (unsigned char*)NearBlock->base() +
00206                             NearBlock->size() : nullptr;
00207 
00208 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00209   void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_EXEC,
00210                     flags, fd, 0);
00211 #else
00212   void *pa = ::mmap(start, PageSize*NumPages, PROT_READ|PROT_WRITE|PROT_EXEC,
00213                     flags, fd, 0);
00214 #endif
00215   if (pa == MAP_FAILED) {
00216     if (NearBlock) //Try again without a near hint
00217       return AllocateRWX(NumBytes, nullptr);
00218 
00219     MakeErrMsg(ErrMsg, "Can't allocate RWX Memory");
00220     return MemoryBlock();
00221   }
00222 
00223 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00224   kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)pa,
00225                                 (vm_size_t)(PageSize*NumPages), 0,
00226                                 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
00227   if (KERN_SUCCESS != kr) {
00228     MakeErrMsg(ErrMsg, "vm_protect max RX failed");
00229     return MemoryBlock();
00230   }
00231 
00232   kr = vm_protect(mach_task_self(), (vm_address_t)pa,
00233                   (vm_size_t)(PageSize*NumPages), 0,
00234                   VM_PROT_READ | VM_PROT_WRITE);
00235   if (KERN_SUCCESS != kr) {
00236     MakeErrMsg(ErrMsg, "vm_protect RW failed");
00237     return MemoryBlock();
00238   }
00239 #endif
00240 
00241   MemoryBlock result;
00242   result.Address = pa;
00243   result.Size = NumPages*PageSize;
00244 
00245   return result;
00246 }
00247 
00248 bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) {
00249   if (M.Address == nullptr || M.Size == 0) return false;
00250   if (0 != ::munmap(M.Address, M.Size))
00251     return MakeErrMsg(ErrMsg, "Can't release RWX Memory");
00252   return false;
00253 }
00254 
00255 bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) {
00256 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00257   if (M.Address == 0 || M.Size == 0) return false;
00258   Memory::InvalidateInstructionCache(M.Address, M.Size);
00259   kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
00260     (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE);
00261   return KERN_SUCCESS == kr;
00262 #else
00263   return true;
00264 #endif
00265 }
00266 
00267 bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) {
00268 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00269   if (M.Address == 0 || M.Size == 0) return false;
00270   Memory::InvalidateInstructionCache(M.Address, M.Size);
00271   kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address,
00272     (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
00273   return KERN_SUCCESS == kr;
00274 #elif defined(__arm__) || defined(__aarch64__)
00275   Memory::InvalidateInstructionCache(M.Address, M.Size);
00276   return true;
00277 #else
00278   return true;
00279 #endif
00280 }
00281 
00282 bool Memory::setRangeWritable(const void *Addr, size_t Size) {
00283 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00284   kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
00285                                 (vm_size_t)Size, 0,
00286                                 VM_PROT_READ | VM_PROT_WRITE);
00287   return KERN_SUCCESS == kr;
00288 #else
00289   return true;
00290 #endif
00291 }
00292 
00293 bool Memory::setRangeExecutable(const void *Addr, size_t Size) {
00294 #if defined(__APPLE__) && (defined(__arm__) || defined(__arm64__))
00295   kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr,
00296                                 (vm_size_t)Size, 0,
00297                                 VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY);
00298   return KERN_SUCCESS == kr;
00299 #else
00300   return true;
00301 #endif
00302 }
00303 
00304 /// InvalidateInstructionCache - Before the JIT can run a block of code
00305 /// that has been emitted it must invalidate the instruction cache on some
00306 /// platforms.
00307 void Memory::InvalidateInstructionCache(const void *Addr,
00308                                         size_t Len) {
00309 
00310 // icache invalidation for PPC and ARM.
00311 #if defined(__APPLE__)
00312 
00313 #  if (defined(__POWERPC__) || defined (__ppc__) || \
00314        defined(_POWER) || defined(_ARCH_PPC) || defined(__arm__) || \
00315        defined(__arm64__))
00316   sys_icache_invalidate(const_cast<void *>(Addr), Len);
00317 #  endif
00318 
00319 #else
00320 
00321 #  if (defined(__POWERPC__) || defined (__ppc__) || \
00322        defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__)
00323   const size_t LineSize = 32;
00324 
00325   const intptr_t Mask = ~(LineSize - 1);
00326   const intptr_t StartLine = ((intptr_t) Addr) & Mask;
00327   const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask;
00328 
00329   for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
00330     asm volatile("dcbf 0, %0" : : "r"(Line));
00331   asm volatile("sync");
00332 
00333   for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
00334     asm volatile("icbi 0, %0" : : "r"(Line));
00335   asm volatile("isync");
00336 #  elif (defined(__arm__) || defined(__aarch64__)) && defined(__GNUC__)
00337   // FIXME: Can we safely always call this for __GNUC__ everywhere?
00338   const char *Start = static_cast<const char *>(Addr);
00339   const char *End = Start + Len;
00340   __clear_cache(const_cast<char *>(Start), const_cast<char *>(End));
00341 #  elif defined(__mips__)
00342   const char *Start = static_cast<const char *>(Addr);
00343 #    if defined(ANDROID)
00344   // The declaration of "cacheflush" in Android bionic:
00345   // extern int cacheflush(long start, long end, long flags);
00346   const char *End = Start + Len;
00347   long LStart = reinterpret_cast<long>(const_cast<char *>(Start));
00348   long LEnd = reinterpret_cast<long>(const_cast<char *>(End));
00349   cacheflush(LStart, LEnd, BCACHE);
00350 #    else
00351   cacheflush(const_cast<char *>(Start), Len, BCACHE);
00352 #    endif
00353 #  endif
00354 
00355 #endif  // end apple
00356 
00357   ValgrindDiscardTranslations(Addr, Len);
00358 }
00359 
00360 } // namespace sys
00361 } // namespace llvm