@@ -81,13 +81,44 @@ uint32_t ToPosixProtectFlags(PageAccess access) {
8181
8282bool IsWritableExecutableMemorySupported () { return true ; }
8383
84+ struct MappedFileRange {
85+ size_t region_begin;
86+ size_t region_end;
87+ };
88+
89+ std::vector<struct MappedFileRange > mapped_file_ranges;
90+
8491void * AllocFixed (void * base_address, size_t length,
8592 AllocationType allocation_type, PageAccess access) {
8693 // mmap does not support reserve / commit, so ignore allocation_type.
8794 uint32_t prot = ToPosixProtectFlags (access);
88- void * result = mmap (base_address, length, prot,
89- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1 , 0 );
95+
96+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
97+ if (base_address != nullptr ) {
98+ flags |= MAP_FIXED_NOREPLACE;
99+ }
100+ void * result = mmap (base_address, length, prot, flags, -1 , 0 );
101+
90102 if (result == MAP_FAILED) {
103+ // If the address is within this range, the mmap failed because we have
104+ // already mapped this memory.
105+ size_t region_begin = (size_t )base_address;
106+ size_t region_end = (size_t )base_address + length;
107+ for (const auto mapped_range : mapped_file_ranges) {
108+ // Check if the allocation is within this range...
109+ if (region_begin >= mapped_range.region_begin &&
110+ region_end <= mapped_range.region_end ) {
111+ bool should_protect = (((uint8_t )allocation_type & 2 ) == 2 );
112+
113+ if (should_protect) {
114+ if (Protect (base_address, length, access)) {
115+ return base_address;
116+ }
117+ } else if ((((uint8_t )allocation_type & 1 ) == 1 )) {
118+ return base_address;
119+ }
120+ }
121+ }
91122 return nullptr ;
92123 } else {
93124 return result;
@@ -96,6 +127,15 @@ void* AllocFixed(void* base_address, size_t length,
96127
97128bool DeallocFixed (void * base_address, size_t length,
98129 DeallocationType deallocation_type) {
130+ size_t region_begin = (size_t )base_address;
131+ size_t region_end = (size_t )base_address + length;
132+ for (const auto mapped_range : mapped_file_ranges) {
133+ if (region_begin >= mapped_range.region_begin &&
134+ region_end <= mapped_range.region_end ) {
135+ return Protect (base_address, length, PageAccess::kNoAccess );
136+ }
137+ }
138+
99139 return munmap (base_address, length) == 0 ;
100140}
101141
@@ -178,12 +218,37 @@ void CloseFileMappingHandle(FileMappingHandle handle,
178218void * MapFileView (FileMappingHandle handle, void * base_address, size_t length,
179219 PageAccess access, size_t file_offset) {
180220 uint32_t prot = ToPosixProtectFlags (access);
181- return mmap64 (base_address, length, prot, MAP_PRIVATE | MAP_ANONYMOUS, handle,
221+
222+ int flags = MAP_SHARED;
223+ if (base_address != nullptr ) {
224+ flags = flags | MAP_FIXED_NOREPLACE;
225+ }
226+
227+ void * result = mmap (base_address, length, prot, flags, handle,
182228 file_offset);
229+
230+ if (result == MAP_FAILED) {
231+ return nullptr ;
232+ }else {
233+ mapped_file_ranges.push_back ({(size_t )result, (size_t )result + length});
234+ return result;
235+ }
183236}
184237
185238bool UnmapFileView (FileMappingHandle handle, void * base_address,
186239 size_t length) {
240+ for (auto mapped_range = mapped_file_ranges.begin ();
241+ mapped_range != mapped_file_ranges.end ();) {
242+ if (mapped_range->region_begin == (size_t )base_address &&
243+ mapped_range->region_end == (size_t )base_address + length) {
244+ mapped_file_ranges.erase (mapped_range);
245+ return munmap (base_address, length) == 0 ;
246+ } else {
247+ mapped_range++;
248+ }
249+ }
250+ // TODO: Implement partial file unmapping.
251+ assert_always (" Error: Partial unmapping of files not yet supported." );
187252 return munmap (base_address, length) == 0 ;
188253}
189254
0 commit comments