mapped_region.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922
  1. //////////////////////////////////////////////////////////////////////////////
  2. //
  3. // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
  4. // Software License, Version 1.0. (See accompanying file
  5. // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  6. //
  7. // See http://www.boost.org/libs/interprocess for documentation.
  8. //
  9. //////////////////////////////////////////////////////////////////////////////
  10. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
  11. #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
  12. #ifndef BOOST_CONFIG_HPP
  13. # include <boost/config.hpp>
  14. #endif
  15. #
  16. #if defined(BOOST_HAS_PRAGMA_ONCE)
  17. # pragma once
  18. #endif
  19. #include <boost/interprocess/detail/config_begin.hpp>
  20. #include <boost/interprocess/detail/workaround.hpp>
  21. #include <boost/interprocess/interprocess_fwd.hpp>
  22. #include <boost/interprocess/exceptions.hpp>
  23. #include <boost/move/utility_core.hpp>
  24. #include <boost/interprocess/detail/utilities.hpp>
  25. #include <boost/interprocess/detail/os_file_functions.hpp>
  26. #include <string>
  27. #include <boost/cstdint.hpp>
  28. #include <boost/assert.hpp>
  29. #include <boost/move/adl_move_swap.hpp>
  30. //Some Unixes use caddr_t instead of void * in madvise
  31. // SunOS Tru64 HP-UX AIX
  32. #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
  33. #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
  34. #include <sys/types.h>
  35. #endif
  36. //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
  37. //we need to be careful to allow it.
  38. #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
  39. #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
  40. #endif
  41. #if defined (BOOST_INTERPROCESS_WINDOWS)
  42. # include <boost/interprocess/detail/win32_api.hpp>
  43. #else
  44. # ifdef BOOST_HAS_UNISTD_H
  45. # include <fcntl.h>
  46. # include <sys/mman.h> //mmap
  47. # include <unistd.h>
  48. # include <sys/stat.h>
  49. # include <sys/types.h>
  50. # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
  51. # include <sys/shm.h> //System V shared memory...
  52. # endif
  53. # include <boost/assert.hpp>
  54. # else
  55. # error Unknown platform
  56. # endif
  57. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  58. //!\file
  59. //!Describes mapped region class
  60. namespace boost {
  61. namespace interprocess {
  62. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  63. //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
  64. //Predeclare it here to avoid any compilation error
  65. #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
  66. extern "C" int madvise(caddr_t, size_t, int);
  67. #endif
  68. namespace ipcdetail{ class interprocess_tester; }
  69. namespace ipcdetail{ class raw_mapped_region_creator; }
  70. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  71. //!The mapped_region class represents a portion or region created from a
  72. //!memory_mappable object.
  73. //!
  74. //!The OS can map a region bigger than the requested one, as region must
  75. //!be multiple of the page size, but mapped_region will always refer to
  76. //!the region specified by the user.
  77. class mapped_region
  78. {
  79. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  80. //Non-copyable
  81. BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
  82. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  83. public:
  84. //!Creates a mapping region of the mapped memory "mapping", starting in
  85. //!offset "offset", and the mapping's size will be "size". The mapping
  86. //!can be opened for read only, read-write or copy-on-write.
  87. //!
  88. //!If an address is specified, both the offset and the address must be
  89. //!multiples of the page size.
  90. //!
  91. //!The map is created using "default_map_options". This flag is OS
  92. //!dependant and it should not be changed unless the user needs to
  93. //!specify special options.
  94. //!
  95. //!In Windows systems "map_options" is a DWORD value passed as
  96. //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
  97. //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
  98. //!
  99. //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
  100. //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
  101. //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
  102. //!
  103. //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
  104. //!to "shmat". If "default_map_options" is specified it's initialized to zero.
  105. //!"map_options" is XORed with SHM_RDONLY if needed.
  106. //!
  107. //!The OS could allocate more pages than size/page_size(), but get_address()
  108. //!will always return the address passed in this function (if not null) and
  109. //!get_size() will return the specified size.
  110. template<class MemoryMappable>
  111. mapped_region(const MemoryMappable& mapping
  112. ,mode_t mode
  113. ,offset_t offset = 0
  114. ,std::size_t size = 0
  115. ,const void *address = 0
  116. ,map_options_t map_options = default_map_options);
  117. //!Default constructor. Address will be 0 (nullptr).
  118. //!Size will be 0.
  119. //!Does not throw
  120. mapped_region() BOOST_NOEXCEPT;
  121. //!Move constructor. *this will be constructed taking ownership of "other"'s
  122. //!region and "other" will be left in default constructor state.
  123. mapped_region(BOOST_RV_REF(mapped_region) other) BOOST_NOEXCEPT
  124. #if defined (BOOST_INTERPROCESS_WINDOWS)
  125. : m_base(0), m_size(0)
  126. , m_page_offset(0)
  127. , m_mode(read_only)
  128. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  129. #else
  130. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  131. #endif
  132. { this->swap(other); }
  133. //!Destroys the mapped region.
  134. //!Does not throw
  135. ~mapped_region();
  136. //!Move assignment. If *this owns a memory mapped region, it will be
  137. //!destroyed and it will take ownership of "other"'s memory mapped region.
  138. mapped_region &operator=(BOOST_RV_REF(mapped_region) other) BOOST_NOEXCEPT
  139. {
  140. mapped_region tmp(boost::move(other));
  141. this->swap(tmp);
  142. return *this;
  143. }
  144. //!Swaps the mapped_region with another
  145. //!mapped region
  146. void swap(mapped_region &other) BOOST_NOEXCEPT;
  147. //!Returns the size of the mapping. Never throws.
  148. std::size_t get_size() const BOOST_NOEXCEPT;
  149. //!Returns the base address of the mapping.
  150. //!Never throws.
  151. void* get_address() const BOOST_NOEXCEPT;
  152. //!Returns the mode of the mapping used to construct the mapped region.
  153. //!Never throws.
  154. mode_t get_mode() const BOOST_NOEXCEPT;
  155. //!Flushes to the disk a byte range within the mapped memory.
  156. //!If 'async' is true, the function will return before flushing operation is completed
  157. //!If 'async' is false, function will return once data has been written into the underlying
  158. //!device (i.e., in mapped files OS cached information is written to disk).
  159. //!Never throws. Returns false if operation could not be performed.
  160. bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
  161. //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
  162. //!mapped memory page, accessing that page can trigger a segmentation fault.
  163. //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
  164. //!and free a portion of the virtual address space (e.g.POSIX) or this
  165. //!function can release some physical memory without freeing any virtual address space(Windows).
  166. //!Returns true on success. Never throws.
  167. bool shrink_by(std::size_t bytes, bool from_back = true);
  168. //!This enum specifies region usage behaviors that an application can specify
  169. //!to the mapped region implementation.
  170. enum advice_types{
  171. //!Specifies that the application has no advice to give on its behavior with respect to
  172. //!the region. It is the default characteristic if no advice is given for a range of memory.
  173. advice_normal,
  174. //!Specifies that the application expects to access the region sequentially from
  175. //!lower addresses to higher addresses. The implementation can lower the priority of
  176. //!preceding pages within the region once a page have been accessed.
  177. advice_sequential,
  178. //!Specifies that the application expects to access the region in a random order,
  179. //!and prefetching is likely not advantageous.
  180. advice_random,
  181. //!Specifies that the application expects to access the region in the near future.
  182. //!The implementation can prefetch pages of the region.
  183. advice_willneed,
  184. //!Specifies that the application expects that it will not access the region in the near future.
  185. //!The implementation can unload pages within the range to save system resources.
  186. advice_dontneed
  187. };
  188. //!Advises the implementation on the expected behavior of the application with respect to the data
  189. //!in the region. The implementation may use this information to optimize handling of the region data.
  190. //!This function has no effect on the semantics of access to memory in the region, although it may affect
  191. //!the performance of access.
  192. //!If the advise type is not known to the implementation, the function returns false. True otherwise.
  193. bool advise(advice_types advise);
  194. //!Returns the size of the page. This size is the minimum memory that
  195. //!will be used by the system when mapping a memory mappable source and
  196. //!will restrict the address and the offset to map.
  197. static std::size_t get_page_size() BOOST_NOEXCEPT;
  198. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  199. private:
  200. //!Closes a previously opened memory mapping. Never throws
  201. void priv_close();
  202. void* priv_map_address() const;
  203. std::size_t priv_map_size() const;
  204. bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
  205. bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
  206. static void priv_size_from_mapping_size
  207. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
  208. static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
  209. template<int dummy>
  210. struct page_size_holder
  211. {
  212. static const std::size_t PageSize;
  213. static std::size_t get_page_size();
  214. };
  215. void* m_base;
  216. std::size_t m_size;
  217. std::size_t m_page_offset;
  218. mode_t m_mode;
  219. #if defined(BOOST_INTERPROCESS_WINDOWS)
  220. file_handle_t m_file_or_mapping_hnd;
  221. #else
  222. bool m_is_xsi;
  223. #endif
  224. friend class ipcdetail::interprocess_tester;
  225. friend class ipcdetail::raw_mapped_region_creator;
  226. void dont_close_on_destruction();
  227. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  228. template<int Dummy>
  229. static void destroy_syncs_in_range(const void *addr, std::size_t size);
  230. #endif
  231. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  232. };
  233. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  234. inline void swap(mapped_region &x, mapped_region &y) BOOST_NOEXCEPT
  235. { x.swap(y); }
  236. inline mapped_region::~mapped_region()
  237. { this->priv_close(); }
  238. inline std::size_t mapped_region::get_size() const BOOST_NOEXCEPT
  239. { return m_size; }
  240. inline mode_t mapped_region::get_mode() const BOOST_NOEXCEPT
  241. { return m_mode; }
  242. inline void* mapped_region::get_address() const BOOST_NOEXCEPT
  243. { return m_base; }
  244. inline void* mapped_region::priv_map_address() const
  245. { return static_cast<char*>(m_base) - m_page_offset; }
  246. inline std::size_t mapped_region::priv_map_size() const
  247. { return m_size + m_page_offset; }
  248. inline bool mapped_region::priv_flush_param_check
  249. (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
  250. {
  251. //Check some errors
  252. if(m_base == 0)
  253. return false;
  254. if(mapping_offset >= m_size || numbytes > (m_size - size_t(mapping_offset))){
  255. return false;
  256. }
  257. //Update flush size if the user does not provide it
  258. if(numbytes == 0){
  259. numbytes = m_size - mapping_offset;
  260. }
  261. addr = (char*)this->priv_map_address() + mapping_offset;
  262. numbytes += m_page_offset;
  263. return true;
  264. }
  265. inline bool mapped_region::priv_shrink_param_check
  266. (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
  267. {
  268. //Check some errors
  269. if(m_base == 0 || bytes > m_size){
  270. return false;
  271. }
  272. else if(bytes == m_size){
  273. this->priv_close();
  274. return true;
  275. }
  276. else{
  277. const std::size_t page_size = mapped_region::get_page_size();
  278. if(from_back){
  279. const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
  280. shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
  281. shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
  282. m_size -= bytes;
  283. }
  284. else{
  285. shrink_page_start = this->priv_map_address();
  286. m_page_offset += bytes;
  287. shrink_page_bytes = (m_page_offset/page_size)*page_size;
  288. m_page_offset = m_page_offset % page_size;
  289. m_size -= bytes;
  290. m_base = static_cast<char *>(m_base) + bytes;
  291. BOOST_ASSERT(shrink_page_bytes%page_size == 0);
  292. }
  293. return true;
  294. }
  295. }
  296. inline void mapped_region::priv_size_from_mapping_size
  297. (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
  298. {
  299. //Check if mapping size fits in the user address space
  300. //as offset_t is the maximum file size and it's signed.
  301. if(mapping_size < offset ||
  302. boost::uintmax_t(mapping_size - (offset - page_offset)) >
  303. boost::uintmax_t(std::size_t(-1))){
  304. error_info err(size_error);
  305. throw interprocess_exception(err);
  306. }
  307. size = static_cast<std::size_t>(mapping_size - offset);
  308. }
  309. inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
  310. {
  311. //We can't map any offset so we have to obtain system's
  312. //memory granularity
  313. const std::size_t page_size = mapped_region::get_page_size();
  314. //We calculate the difference between demanded and valid offset
  315. //(always less than a page in std::size_t, thus, representable by std::size_t)
  316. const std::size_t page_offset =
  317. static_cast<std::size_t>(offset - (offset / offset_t(page_size)) * offset_t(page_size));
  318. //Update the mapping address
  319. if(address){
  320. address = static_cast<const char*>(address) - page_offset;
  321. }
  322. return offset_t(page_offset);
  323. }
  324. #if defined (BOOST_INTERPROCESS_WINDOWS)
  325. inline mapped_region::mapped_region() BOOST_NOEXCEPT
  326. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
  327. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  328. {}
  329. template<int dummy>
  330. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  331. {
  332. winapi::interprocess_system_info info;
  333. winapi::get_system_info(&info);
  334. return std::size_t(info.dwAllocationGranularity);
  335. }
  336. template<class MemoryMappable>
  337. inline mapped_region::mapped_region
  338. (const MemoryMappable &mapping
  339. ,mode_t mode
  340. ,offset_t offset
  341. ,std::size_t size
  342. ,const void *address
  343. ,map_options_t map_options)
  344. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
  345. , m_file_or_mapping_hnd(ipcdetail::invalid_file())
  346. {
  347. mapping_handle_t mhandle = mapping.get_mapping_handle();
  348. {
  349. file_handle_t native_mapping_handle = 0;
  350. //Set accesses
  351. //For "create_file_mapping"
  352. unsigned long protection = 0;
  353. //For "mapviewoffile"
  354. unsigned long map_access = map_options == default_map_options ? 0 : map_options;
  355. switch(mode)
  356. {
  357. case read_only:
  358. case read_private:
  359. protection |= winapi::page_readonly;
  360. map_access |= winapi::file_map_read;
  361. break;
  362. case read_write:
  363. protection |= winapi::page_readwrite;
  364. map_access |= winapi::file_map_write;
  365. break;
  366. case copy_on_write:
  367. protection |= winapi::page_writecopy;
  368. map_access |= winapi::file_map_copy;
  369. break;
  370. default:
  371. {
  372. error_info err(mode_error);
  373. throw interprocess_exception(err);
  374. }
  375. break;
  376. }
  377. //For file mapping (including emulated shared memory through temporary files),
  378. //the device is a file handle so we need to obtain file's size and call create_file_mapping
  379. //to obtain the mapping handle.
  380. //For files we don't need the file mapping after mapping the memory, as the file is there
  381. //so we'll program the handle close
  382. void * handle_to_close = winapi::invalid_handle_value;
  383. if(!mhandle.is_shm){
  384. //Create mapping handle
  385. native_mapping_handle = winapi::create_file_mapping
  386. ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
  387. , protection, 0, (char*)0, 0);
  388. //Check if all is correct
  389. if(!native_mapping_handle){
  390. error_info err ((int)winapi::get_last_error());
  391. throw interprocess_exception(err);
  392. }
  393. handle_to_close = native_mapping_handle;
  394. }
  395. else{
  396. //For windows_shared_memory the device handle is already a mapping handle
  397. //and we need to maintain it
  398. native_mapping_handle = mhandle.handle;
  399. }
  400. //RAII handle close on scope exit
  401. const winapi::handle_closer close_handle(handle_to_close);
  402. (void)close_handle;
  403. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  404. //Obtain mapping size if user provides 0 size
  405. if(size == 0){
  406. offset_t mapping_size;
  407. if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
  408. error_info err((int)winapi::get_last_error());
  409. throw interprocess_exception(err);
  410. }
  411. //This can throw
  412. priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
  413. }
  414. //Map with new offsets and size
  415. void *base = winapi::map_view_of_file_ex
  416. (native_mapping_handle,
  417. map_access,
  418. ::boost::ulong_long_type(offset - page_offset),
  419. static_cast<std::size_t>(page_offset + size),
  420. const_cast<void*>(address));
  421. //Check error
  422. if(!base){
  423. error_info err((int)winapi::get_last_error());
  424. throw interprocess_exception(err);
  425. }
  426. //Calculate new base for the user
  427. m_base = static_cast<char*>(base) + page_offset;
  428. m_page_offset = static_cast<std::size_t>(page_offset);
  429. m_size = size;
  430. }
  431. //Windows shared memory needs the duplication of the handle if we want to
  432. //make mapped_region independent from the mappable device
  433. //
  434. //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
  435. if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
  436. error_info err((int)winapi::get_last_error());
  437. this->priv_close();
  438. throw interprocess_exception(err);
  439. }
  440. }
  441. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  442. {
  443. void *addr;
  444. if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  445. return false;
  446. }
  447. //Flush it all
  448. if(!winapi::flush_view_of_file(addr, numbytes)){
  449. return false;
  450. }
  451. //m_file_or_mapping_hnd can be a file handle or a mapping handle.
  452. //so flushing file buffers has only sense for files...
  453. else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
  454. winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
  455. return winapi::flush_file_buffers(m_file_or_mapping_hnd);
  456. }
  457. return true;
  458. }
  459. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  460. {
  461. void *shrink_page_start = 0;
  462. std::size_t shrink_page_bytes = 0;
  463. if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  464. return false;
  465. }
  466. else if(shrink_page_bytes){
  467. //In Windows, we can't decommit the storage or release the virtual address space,
  468. //the best we can do is try to remove some memory from the process working set.
  469. //With a bit of luck we can free some physical memory.
  470. unsigned long old_protect_ignored;
  471. bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
  472. || (winapi::get_last_error() == winapi::error_not_locked);
  473. (void)old_protect_ignored;
  474. //Change page protection to forbid any further access
  475. b_ret = b_ret && winapi::virtual_protect
  476. (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
  477. return b_ret;
  478. }
  479. else{
  480. return true;
  481. }
  482. }
  483. inline bool mapped_region::advise(advice_types)
  484. {
  485. //Windows has no madvise/posix_madvise equivalent
  486. return false;
  487. }
  488. inline void mapped_region::priv_close()
  489. {
  490. if(m_base){
  491. void *addr = this->priv_map_address();
  492. #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  493. mapped_region::destroy_syncs_in_range<0>(addr, m_size);
  494. #endif
  495. winapi::unmap_view_of_file(addr);
  496. m_base = 0;
  497. }
  498. if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
  499. winapi::close_handle(m_file_or_mapping_hnd);
  500. m_file_or_mapping_hnd = ipcdetail::invalid_file();
  501. }
  502. }
  503. inline void mapped_region::dont_close_on_destruction()
  504. {}
  505. #else //#if defined (BOOST_INTERPROCESS_WINDOWS)
  506. inline mapped_region::mapped_region() BOOST_NOEXCEPT
  507. : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
  508. {}
  509. template<int dummy>
  510. inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
  511. { return std::size_t(sysconf(_SC_PAGESIZE)); }
  512. template<class MemoryMappable>
  513. inline mapped_region::mapped_region
  514. ( const MemoryMappable &mapping
  515. , mode_t mode
  516. , offset_t offset
  517. , std::size_t size
  518. , const void *address
  519. , map_options_t map_options)
  520. : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
  521. {
  522. mapping_handle_t map_hnd = mapping.get_mapping_handle();
  523. //Some systems dont' support XSI shared memory
  524. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  525. if(map_hnd.is_xsi){
  526. //Get the size
  527. ::shmid_ds xsi_ds;
  528. int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
  529. if(ret == -1){
  530. error_info err(system_error_code());
  531. throw interprocess_exception(err);
  532. }
  533. //Compare sizess
  534. if(size == 0){
  535. size = (std::size_t)xsi_ds.shm_segsz;
  536. }
  537. else if(size != (std::size_t)xsi_ds.shm_segsz){
  538. error_info err(size_error);
  539. throw interprocess_exception(err);
  540. }
  541. //Calculate flag
  542. int flag = map_options == default_map_options ? 0 : map_options;
  543. if(m_mode == read_only){
  544. flag |= SHM_RDONLY;
  545. }
  546. else if(m_mode != read_write){
  547. error_info err(mode_error);
  548. throw interprocess_exception(err);
  549. }
  550. //Attach memory
  551. //Some old shmat implementation take the address as a non-const void pointer
  552. //so uncast it to make code portable.
  553. void *const final_address = const_cast<void *>(address);
  554. void *base = ::shmat(map_hnd.handle, final_address, flag);
  555. if(base == (void*)-1){
  556. error_info err(system_error_code());
  557. throw interprocess_exception(err);
  558. }
  559. //Update members
  560. m_base = base;
  561. m_size = size;
  562. m_mode = mode;
  563. m_page_offset = 0;
  564. m_is_xsi = true;
  565. return;
  566. }
  567. #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  568. //We calculate the difference between demanded and valid offset
  569. const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
  570. if(size == 0){
  571. struct ::stat buf;
  572. if(0 != fstat(map_hnd.handle, &buf)){
  573. error_info err(system_error_code());
  574. throw interprocess_exception(err);
  575. }
  576. //This can throw
  577. priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
  578. }
  579. #ifdef MAP_NOSYNC
  580. #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
  581. #else
  582. #define BOOST_INTERPROCESS_MAP_NOSYNC 0
  583. #endif //MAP_NOSYNC
  584. //Create new mapping
  585. int prot = 0;
  586. int flags = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
  587. #undef BOOST_INTERPROCESS_MAP_NOSYNC
  588. switch(mode)
  589. {
  590. case read_only:
  591. prot |= PROT_READ;
  592. flags |= MAP_SHARED;
  593. break;
  594. case read_private:
  595. prot |= (PROT_READ);
  596. flags |= MAP_PRIVATE;
  597. break;
  598. case read_write:
  599. prot |= (PROT_WRITE | PROT_READ);
  600. flags |= MAP_SHARED;
  601. break;
  602. case copy_on_write:
  603. prot |= (PROT_WRITE | PROT_READ);
  604. flags |= MAP_PRIVATE;
  605. break;
  606. default:
  607. {
  608. error_info err(mode_error);
  609. throw interprocess_exception(err);
  610. }
  611. break;
  612. }
  613. //Map it to the address space
  614. void* base = mmap ( const_cast<void*>(address)
  615. , static_cast<std::size_t>(page_offset) + size
  616. , prot
  617. , flags
  618. , mapping.get_mapping_handle().handle
  619. , offset - page_offset);
  620. //Check if mapping was successful
  621. if(base == MAP_FAILED){
  622. error_info err = system_error_code();
  623. throw interprocess_exception(err);
  624. }
  625. //Calculate new base for the user
  626. m_base = static_cast<char*>(base) + page_offset;
  627. m_page_offset = static_cast<std::size_t>(page_offset);
  628. m_size = size;
  629. //Check for fixed mapping error
  630. if(address && (base != address)){
  631. error_info err(busy_error);
  632. this->priv_close();
  633. throw interprocess_exception(err);
  634. }
  635. }
  636. inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
  637. {
  638. void *shrink_page_start = 0;
  639. std::size_t shrink_page_bytes = 0;
  640. if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
  641. return false;
  642. }
  643. else if(shrink_page_bytes){
  644. //In UNIX we can decommit and free virtual address space.
  645. return 0 == munmap(shrink_page_start, shrink_page_bytes);
  646. }
  647. else{
  648. return true;
  649. }
  650. }
  651. inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
  652. {
  653. void *addr;
  654. if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
  655. return false;
  656. }
  657. //Flush it all
  658. return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
  659. }
  660. inline bool mapped_region::advise(advice_types advice)
  661. {
  662. int unix_advice = 0;
  663. //Modes; 0: none, 2: posix, 1: madvise
  664. const unsigned int mode_none = 0;
  665. const unsigned int mode_padv = 1;
  666. const unsigned int mode_madv = 2;
  667. // Suppress "unused variable" warnings
  668. (void)mode_padv;
  669. (void)mode_madv;
  670. unsigned int mode = mode_none;
  671. //Choose advice either from POSIX (preferred) or native Unix
  672. switch(advice){
  673. case advice_normal:
  674. #if defined(POSIX_MADV_NORMAL)
  675. unix_advice = POSIX_MADV_NORMAL;
  676. mode = mode_padv;
  677. #elif defined(MADV_NORMAL)
  678. unix_advice = MADV_NORMAL;
  679. mode = mode_madv;
  680. #endif
  681. break;
  682. case advice_sequential:
  683. #if defined(POSIX_MADV_SEQUENTIAL)
  684. unix_advice = POSIX_MADV_SEQUENTIAL;
  685. mode = mode_padv;
  686. #elif defined(MADV_SEQUENTIAL)
  687. unix_advice = MADV_SEQUENTIAL;
  688. mode = mode_madv;
  689. #endif
  690. break;
  691. case advice_random:
  692. #if defined(POSIX_MADV_RANDOM)
  693. unix_advice = POSIX_MADV_RANDOM;
  694. mode = mode_padv;
  695. #elif defined(MADV_RANDOM)
  696. unix_advice = MADV_RANDOM;
  697. mode = mode_madv;
  698. #endif
  699. break;
  700. case advice_willneed:
  701. #if defined(POSIX_MADV_WILLNEED)
  702. unix_advice = POSIX_MADV_WILLNEED;
  703. mode = mode_padv;
  704. #elif defined(MADV_WILLNEED)
  705. unix_advice = MADV_WILLNEED;
  706. mode = mode_madv;
  707. #endif
  708. break;
  709. case advice_dontneed:
  710. #if defined(POSIX_MADV_DONTNEED)
  711. unix_advice = POSIX_MADV_DONTNEED;
  712. mode = mode_padv;
  713. #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
  714. unix_advice = MADV_DONTNEED;
  715. mode = mode_madv;
  716. #endif
  717. break;
  718. default:
  719. return false;
  720. }
  721. switch(mode){
  722. #if defined(POSIX_MADV_NORMAL)
  723. case mode_padv:
  724. return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
  725. #endif
  726. #if defined(MADV_NORMAL)
  727. case mode_madv:
  728. return 0 == madvise(
  729. #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
  730. (caddr_t)
  731. #endif
  732. this->priv_map_address(), this->priv_map_size(), unix_advice);
  733. #endif
  734. default:
  735. return false;
  736. }
  737. }
  738. inline void mapped_region::priv_close()
  739. {
  740. if(m_base != 0){
  741. #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  742. if(m_is_xsi){
  743. int ret = ::shmdt(m_base);
  744. BOOST_ASSERT(ret == 0);
  745. (void)ret;
  746. return;
  747. }
  748. #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
  749. munmap(this->priv_map_address(), this->priv_map_size());
  750. m_base = 0;
  751. }
  752. }
  753. inline void mapped_region::dont_close_on_destruction()
  754. { m_base = 0; }
  755. #endif //#if defined (BOOST_INTERPROCESS_WINDOWS)
  756. template<int dummy>
  757. const std::size_t mapped_region::page_size_holder<dummy>::PageSize
  758. = mapped_region::page_size_holder<dummy>::get_page_size();
  759. inline std::size_t mapped_region::get_page_size() BOOST_NOEXCEPT
  760. {
  761. if(!page_size_holder<0>::PageSize)
  762. return page_size_holder<0>::get_page_size();
  763. else
  764. return page_size_holder<0>::PageSize;
  765. }
  766. inline void mapped_region::swap(mapped_region &other) BOOST_NOEXCEPT
  767. {
  768. ::boost::adl_move_swap(this->m_base, other.m_base);
  769. ::boost::adl_move_swap(this->m_size, other.m_size);
  770. ::boost::adl_move_swap(this->m_page_offset, other.m_page_offset);
  771. ::boost::adl_move_swap(this->m_mode, other.m_mode);
  772. #if defined (BOOST_INTERPROCESS_WINDOWS)
  773. ::boost::adl_move_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
  774. #else
  775. ::boost::adl_move_swap(this->m_is_xsi, other.m_is_xsi);
  776. #endif
  777. }
  778. //!No-op functor
  779. struct null_mapped_region_function
  780. {
  781. bool operator()(void *, std::size_t , bool) const
  782. { return true; }
  783. static std::size_t get_min_size()
  784. { return 0; }
  785. };
  786. #endif //#ifndef BOOST_INTERPROCESS_DOXYGEN_INVOKED
  787. } //namespace interprocess {
  788. } //namespace boost {
  789. #include <boost/interprocess/detail/config_end.hpp>
  790. #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
  791. #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
  792. #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  793. #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  794. #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  795. # include <boost/interprocess/sync/windows/sync_utils.hpp>
  796. # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
  797. namespace boost {
  798. namespace interprocess {
  799. template<int Dummy>
  800. inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
  801. {
  802. ipcdetail::sync_handles &handles =
  803. ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
  804. handles.destroy_syncs_in_range(addr, size);
  805. }
  806. } //namespace interprocess {
  807. } //namespace boost {
  808. #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
  809. #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
  810. #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)