src/gb/memory.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/memory.h>
7
8#include <mgba/core/interface.h>
9#include <mgba/internal/gb/gb.h>
10#include <mgba/internal/gb/io.h>
11#include <mgba/internal/gb/mbc.h>
12#include <mgba/internal/gb/serialize.h>
13#include <mgba/internal/lr35902/lr35902.h>
14
15#include <mgba-util/memory.h>
16
17mLOG_DEFINE_CATEGORY(GB_MEM, "GB Memory", "gb.memory");
18
19static void _pristineCow(struct GB* gba);
20
21static uint8_t GBFastLoad8(struct LR35902Core* cpu, uint16_t address) {
22 if (UNLIKELY(address >= cpu->memory.activeRegionEnd)) {
23 cpu->memory.setActiveRegion(cpu, address);
24 return cpu->memory.cpuLoad8(cpu, address);
25 }
26 return cpu->memory.activeRegion[address & cpu->memory.activeMask];
27}
28
29static void GBSetActiveRegion(struct LR35902Core* cpu, uint16_t address) {
30 struct GB* gb = (struct GB*) cpu->master;
31 struct GBMemory* memory = &gb->memory;
32 switch (address >> 12) {
33 case GB_REGION_CART_BANK0:
34 case GB_REGION_CART_BANK0 + 1:
35 case GB_REGION_CART_BANK0 + 2:
36 case GB_REGION_CART_BANK0 + 3:
37 cpu->memory.cpuLoad8 = GBFastLoad8;
38 cpu->memory.activeRegion = memory->romBase;
39 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1;
40 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
41 break;
42 case GB_REGION_CART_BANK1:
43 case GB_REGION_CART_BANK1 + 1:
44 case GB_REGION_CART_BANK1 + 2:
45 case GB_REGION_CART_BANK1 + 3:
46 cpu->memory.cpuLoad8 = GBFastLoad8;
47 cpu->memory.activeRegion = memory->romBank;
48 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
49 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
50 break;
51 default:
52 cpu->memory.cpuLoad8 = GBLoad8;
53 break;
54 }
55}
56
57static void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
58static void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
59
60void GBMemoryInit(struct GB* gb) {
61 struct LR35902Core* cpu = gb->cpu;
62 cpu->memory.cpuLoad8 = GBLoad8;
63 cpu->memory.load8 = GBLoad8;
64 cpu->memory.store8 = GBStore8;
65 cpu->memory.setActiveRegion = GBSetActiveRegion;
66
67 gb->memory.wram = 0;
68 gb->memory.wramBank = 0;
69 gb->memory.rom = 0;
70 gb->memory.romBank = 0;
71 gb->memory.romSize = 0;
72 gb->memory.sram = 0;
73 gb->memory.mbcType = GB_MBC_AUTODETECT;
74 gb->memory.mbc = 0;
75
76 gb->memory.rtc = NULL;
77
78 GBIOInit(gb);
79}
80
81void GBMemoryDeinit(struct GB* gb) {
82 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
83 if (gb->memory.rom) {
84 mappedMemoryFree(gb->memory.rom, gb->memory.romSize);
85 }
86}
87
88void GBMemoryReset(struct GB* gb) {
89 if (gb->memory.wram) {
90 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
91 }
92 gb->memory.wram = anonymousMemoryMap(GB_SIZE_WORKING_RAM);
93 if (gb->model >= GB_MODEL_CGB) {
94 uint32_t* base = (uint32_t*) gb->memory.wram;
95 size_t i;
96 uint32_t pattern = 0;
97 for (i = 0; i < GB_SIZE_WORKING_RAM / 4; i += 4) {
98 if ((i & 0x1FF) == 0) {
99 pattern = ~pattern;
100 }
101 base[i + 0] = pattern;
102 base[i + 1] = pattern;
103 base[i + 2] = ~pattern;
104 base[i + 3] = ~pattern;
105 }
106 }
107 GBMemorySwitchWramBank(&gb->memory, 1);
108 gb->memory.romBank = &gb->memory.rom[GB_SIZE_CART_BANK0];
109 gb->memory.currentBank = 1;
110 gb->memory.sramCurrentBank = 0;
111
112 gb->memory.ime = false;
113 gb->memory.ie = 0;
114
115 gb->memory.dmaRemaining = 0;
116 gb->memory.dmaSource = 0;
117 gb->memory.dmaDest = 0;
118 gb->memory.hdmaRemaining = 0;
119 gb->memory.hdmaSource = 0;
120 gb->memory.hdmaDest = 0;
121 gb->memory.isHdma = false;
122
123
124 gb->memory.dmaEvent.context = gb;
125 gb->memory.dmaEvent.name = "GB DMA";
126 gb->memory.dmaEvent.callback = _GBMemoryDMAService;
127 gb->memory.dmaEvent.priority = 0x40;
128 gb->memory.hdmaEvent.context = gb;
129 gb->memory.hdmaEvent.name = "GB HDMA";
130 gb->memory.hdmaEvent.callback = _GBMemoryHDMAService;
131 gb->memory.hdmaEvent.priority = 0x41;
132
133 gb->memory.sramAccess = false;
134 gb->memory.rtcAccess = false;
135 gb->memory.activeRtcReg = 0;
136 gb->memory.rtcLatched = false;
137 memset(&gb->memory.rtcRegs, 0, sizeof(gb->memory.rtcRegs));
138
139 memset(&gb->memory.hram, 0, sizeof(gb->memory.hram));
140 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
141
142 GBMBCInit(gb);
143 gb->memory.sramBank = gb->memory.sram;
144
145 if (!gb->memory.wram) {
146 GBMemoryDeinit(gb);
147 }
148}
149
150void GBMemorySwitchWramBank(struct GBMemory* memory, int bank) {
151 bank &= 7;
152 if (!bank) {
153 bank = 1;
154 }
155 memory->wramBank = &memory->wram[GB_SIZE_WORKING_RAM_BANK0 * bank];
156 memory->wramCurrentBank = bank;
157}
158
159uint8_t GBLoad8(struct LR35902Core* cpu, uint16_t address) {
160 struct GB* gb = (struct GB*) cpu->master;
161 struct GBMemory* memory = &gb->memory;
162 switch (address >> 12) {
163 case GB_REGION_CART_BANK0:
164 case GB_REGION_CART_BANK0 + 1:
165 case GB_REGION_CART_BANK0 + 2:
166 case GB_REGION_CART_BANK0 + 3:
167 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
168 case GB_REGION_CART_BANK1:
169 case GB_REGION_CART_BANK1 + 1:
170 case GB_REGION_CART_BANK1 + 2:
171 case GB_REGION_CART_BANK1 + 3:
172 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
173 case GB_REGION_VRAM:
174 case GB_REGION_VRAM + 1:
175 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
176 case GB_REGION_EXTERNAL_RAM:
177 case GB_REGION_EXTERNAL_RAM + 1:
178 if (memory->rtcAccess) {
179 return memory->rtcRegs[memory->activeRtcReg];
180 } else if (memory->sramAccess) {
181 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
182 } else if (memory->mbcType == GB_MBC7) {
183 return GBMBC7Read(memory, address);
184 } else if (memory->mbcType == GB_HuC3) {
185 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
186 }
187 return 0xFF;
188 case GB_REGION_WORKING_RAM_BANK0:
189 case GB_REGION_WORKING_RAM_BANK0 + 2:
190 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
191 case GB_REGION_WORKING_RAM_BANK1:
192 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
193 default:
194 if (address < GB_BASE_OAM) {
195 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
196 }
197 if (address < GB_BASE_UNUSABLE) {
198 if (gb->video.mode < 2) {
199 return gb->video.oam.raw[address & 0xFF];
200 }
201 return 0xFF;
202 }
203 if (address < GB_BASE_IO) {
204 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
205 return 0xFF;
206 }
207 if (address < GB_BASE_HRAM) {
208 return GBIORead(gb, address & (GB_SIZE_IO - 1));
209 }
210 if (address < GB_BASE_IE) {
211 return memory->hram[address & GB_SIZE_HRAM];
212 }
213 return GBIORead(gb, REG_IE);
214 }
215}
216
217void GBStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
218 struct GB* gb = (struct GB*) cpu->master;
219 struct GBMemory* memory = &gb->memory;
220 switch (address >> 12) {
221 case GB_REGION_CART_BANK0:
222 case GB_REGION_CART_BANK0 + 1:
223 case GB_REGION_CART_BANK0 + 2:
224 case GB_REGION_CART_BANK0 + 3:
225 case GB_REGION_CART_BANK1:
226 case GB_REGION_CART_BANK1 + 1:
227 case GB_REGION_CART_BANK1 + 2:
228 case GB_REGION_CART_BANK1 + 3:
229 memory->mbc(gb, address, value);
230 cpu->memory.setActiveRegion(cpu, cpu->pc);
231 return;
232 case GB_REGION_VRAM:
233 case GB_REGION_VRAM + 1:
234 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) | (GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank));
235 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
236 return;
237 case GB_REGION_EXTERNAL_RAM:
238 case GB_REGION_EXTERNAL_RAM + 1:
239 if (memory->rtcAccess) {
240 memory->rtcRegs[memory->activeRtcReg] = value;
241 } else if (memory->sramAccess) {
242 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
243 } else if (memory->mbcType == GB_MBC7) {
244 GBMBC7Write(memory, address, value);
245 }
246 gb->sramDirty |= GB_SRAM_DIRT_NEW;
247 return;
248 case GB_REGION_WORKING_RAM_BANK0:
249 case GB_REGION_WORKING_RAM_BANK0 + 2:
250 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
251 return;
252 case GB_REGION_WORKING_RAM_BANK1:
253 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
254 return;
255 default:
256 if (address < GB_BASE_OAM) {
257 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
258 } else if (address < GB_BASE_UNUSABLE) {
259 if (gb->video.mode < 2) {
260 gb->video.oam.raw[address & 0xFF] = value;
261 }
262 } else if (address < GB_BASE_IO) {
263 mLOG(GB_MEM, GAME_ERROR, "Attempt to write to unusable memory: %04X:%02X", address, value);
264 } else if (address < GB_BASE_HRAM) {
265 GBIOWrite(gb, address & (GB_SIZE_IO - 1), value);
266 } else if (address < GB_BASE_IE) {
267 memory->hram[address & GB_SIZE_HRAM] = value;
268 } else {
269 GBIOWrite(gb, REG_IE, value);
270 }
271 }
272}
273uint8_t GBView8(struct LR35902Core* cpu, uint16_t address, int segment) {
274 struct GB* gb = (struct GB*) cpu->master;
275 struct GBMemory* memory = &gb->memory;
276 switch (address >> 12) {
277 case GB_REGION_CART_BANK0:
278 case GB_REGION_CART_BANK0 + 1:
279 case GB_REGION_CART_BANK0 + 2:
280 case GB_REGION_CART_BANK0 + 3:
281 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
282 case GB_REGION_CART_BANK1:
283 case GB_REGION_CART_BANK1 + 1:
284 case GB_REGION_CART_BANK1 + 2:
285 case GB_REGION_CART_BANK1 + 3:
286 if (segment < 0) {
287 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
288 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
289 return memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
290 } else {
291 return 0xFF;
292 }
293 case GB_REGION_VRAM:
294 case GB_REGION_VRAM + 1:
295 if (segment < 0) {
296 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
297 } else if (segment < 2) {
298 return gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment *GB_SIZE_VRAM_BANK0];
299 } else {
300 return 0xFF;
301 }
302 case GB_REGION_EXTERNAL_RAM:
303 case GB_REGION_EXTERNAL_RAM + 1:
304 if (memory->rtcAccess) {
305 return memory->rtcRegs[memory->activeRtcReg];
306 } else if (memory->sramAccess) {
307 if (segment < 0) {
308 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
309 } else if ((size_t) segment * GB_SIZE_EXTERNAL_RAM < gb->sramSize) {
310 return memory->sram[(address & (GB_SIZE_EXTERNAL_RAM - 1)) + segment *GB_SIZE_EXTERNAL_RAM];
311 } else {
312 return 0xFF;
313 }
314 } else if (memory->mbcType == GB_MBC7) {
315 return GBMBC7Read(memory, address);
316 } else if (memory->mbcType == GB_HuC3) {
317 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
318 }
319 return 0xFF;
320 case GB_REGION_WORKING_RAM_BANK0:
321 case GB_REGION_WORKING_RAM_BANK0 + 2:
322 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
323 case GB_REGION_WORKING_RAM_BANK1:
324 if (segment < 0) {
325 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
326 } else if (segment < 8) {
327 return memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment *GB_SIZE_WORKING_RAM_BANK0];
328 } else {
329 return 0xFF;
330 }
331 default:
332 if (address < GB_BASE_OAM) {
333 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
334 }
335 if (address < GB_BASE_UNUSABLE) {
336 if (gb->video.mode < 2) {
337 return gb->video.oam.raw[address & 0xFF];
338 }
339 return 0xFF;
340 }
341 if (address < GB_BASE_IO) {
342 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
343 return 0xFF;
344 }
345 if (address < GB_BASE_HRAM) {
346 return GBIORead(gb, address & (GB_SIZE_IO - 1));
347 }
348 if (address < GB_BASE_IE) {
349 return memory->hram[address & GB_SIZE_HRAM];
350 }
351 return GBIORead(gb, REG_IE);
352 }
353}
354
355void GBMemoryDMA(struct GB* gb, uint16_t base) {
356 if (base > 0xF100) {
357 return;
358 }
359 gb->cpu->memory.store8 = GBDMAStore8;
360 gb->cpu->memory.load8 = GBDMALoad8;
361 gb->cpu->memory.cpuLoad8 = GBDMALoad8;
362 mTimingSchedule(&gb->timing, &gb->memory.dmaEvent, 8);
363 if (gb->cpu->cycles + 8 < gb->cpu->nextEvent) {
364 gb->cpu->nextEvent = gb->cpu->cycles + 8;
365 }
366 gb->memory.dmaSource = base;
367 gb->memory.dmaDest = 0;
368 gb->memory.dmaRemaining = 0xA0;
369}
370
371void GBMemoryWriteHDMA5(struct GB* gb, uint8_t value) {
372 gb->memory.hdmaSource = gb->memory.io[REG_HDMA1] << 8;
373 gb->memory.hdmaSource |= gb->memory.io[REG_HDMA2];
374 gb->memory.hdmaDest = gb->memory.io[REG_HDMA3] << 8;
375 gb->memory.hdmaDest |= gb->memory.io[REG_HDMA4];
376 gb->memory.hdmaSource &= 0xFFF0;
377 if (gb->memory.hdmaSource >= 0x8000 && gb->memory.hdmaSource < 0xA000) {
378 mLOG(GB_MEM, GAME_ERROR, "Invalid HDMA source: %04X", gb->memory.hdmaSource);
379 return;
380 }
381 gb->memory.hdmaDest &= 0x1FF0;
382 gb->memory.hdmaDest |= 0x8000;
383 bool wasHdma = gb->memory.isHdma;
384 gb->memory.isHdma = value & 0x80;
385 if ((!wasHdma && !gb->memory.isHdma) || gb->video.mode == 0) {
386 gb->memory.hdmaRemaining = ((value & 0x7F) + 1) * 0x10;
387 gb->cpuBlocked = true;
388 mTimingSchedule(&gb->timing, &gb->memory.hdmaEvent, 0);
389 gb->cpu->nextEvent = gb->cpu->cycles;
390 }
391}
392
393void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
394 struct GB* gb = context;
395 uint8_t b = GBLoad8(gb->cpu, gb->memory.dmaSource);
396 // TODO: Can DMA write OAM during modes 2-3?
397 gb->video.oam.raw[gb->memory.dmaDest] = b;
398 ++gb->memory.dmaSource;
399 ++gb->memory.dmaDest;
400 --gb->memory.dmaRemaining;
401 if (gb->memory.dmaRemaining) {
402 mTimingSchedule(timing, &gb->memory.dmaEvent, 4 - cyclesLate);
403 } else {
404 gb->cpu->memory.store8 = GBStore8;
405 gb->cpu->memory.load8 = GBLoad8;
406 }
407}
408
409void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
410 struct GB* gb = context;
411 gb->cpuBlocked = true;
412 uint8_t b = gb->cpu->memory.load8(gb->cpu, gb->memory.hdmaSource);
413 gb->cpu->memory.store8(gb->cpu, gb->memory.hdmaDest, b);
414 ++gb->memory.hdmaSource;
415 ++gb->memory.hdmaDest;
416 --gb->memory.hdmaRemaining;
417 if (gb->memory.hdmaRemaining) {
418 mTimingDeschedule(timing, &gb->memory.hdmaEvent);
419 mTimingSchedule(timing, &gb->memory.hdmaEvent, 2 - cyclesLate);
420 } else {
421 gb->cpuBlocked = false;
422 gb->memory.io[REG_HDMA1] = gb->memory.hdmaSource >> 8;
423 gb->memory.io[REG_HDMA2] = gb->memory.hdmaSource;
424 gb->memory.io[REG_HDMA3] = gb->memory.hdmaDest >> 8;
425 gb->memory.io[REG_HDMA4] = gb->memory.hdmaDest;
426 if (gb->memory.isHdma) {
427 --gb->memory.io[REG_HDMA5];
428 if (gb->memory.io[REG_HDMA5] == 0xFF) {
429 gb->memory.isHdma = false;
430 }
431 } else {
432 gb->memory.io[REG_HDMA5] = 0xFF;
433 }
434 }
435}
436
437struct OAMBlock {
438 uint16_t low;
439 uint16_t high;
440};
441
442static const struct OAMBlock _oamBlockDMG[] = {
443 { 0xA000, 0xFE00 },
444 { 0xA000, 0xFE00 },
445 { 0xA000, 0xFE00 },
446 { 0xA000, 0xFE00 },
447 { 0x8000, 0xA000 },
448 { 0xA000, 0xFE00 },
449 { 0xA000, 0xFE00 },
450 { 0xA000, 0xFE00 },
451};
452
453static const struct OAMBlock _oamBlockCGB[] = {
454 { 0xA000, 0xC000 },
455 { 0xA000, 0xC000 },
456 { 0xA000, 0xC000 },
457 { 0xA000, 0xC000 },
458 { 0x8000, 0xA000 },
459 { 0xA000, 0xC000 },
460 { 0xC000, 0xFE00 },
461 { 0xA000, 0xC000 },
462};
463
464uint8_t GBDMALoad8(struct LR35902Core* cpu, uint16_t address) {
465 struct GB* gb = (struct GB*) cpu->master;
466 struct GBMemory* memory = &gb->memory;
467 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
468 block = &block[memory->dmaSource >> 13];
469 if (address >= block->low && address < block->high) {
470 return 0xFF;
471 }
472 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
473 return 0xFF;
474 }
475 return GBLoad8(cpu, address);
476}
477
478void GBDMAStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
479 struct GB* gb = (struct GB*) cpu->master;
480 struct GBMemory* memory = &gb->memory;
481 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
482 block = &block[memory->dmaSource >> 13];
483 if (address >= block->low && address < block->high) {
484 return;
485 }
486 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
487 return;
488 }
489 GBStore8(cpu, address, value);
490}
491
492void GBPatch8(struct LR35902Core* cpu, uint16_t address, int8_t value, int8_t* old, int segment) {
493 struct GB* gb = (struct GB*) cpu->master;
494 struct GBMemory* memory = &gb->memory;
495 int8_t oldValue = -1;
496
497 switch (address >> 12) {
498 case GB_REGION_CART_BANK0:
499 case GB_REGION_CART_BANK0 + 1:
500 case GB_REGION_CART_BANK0 + 2:
501 case GB_REGION_CART_BANK0 + 3:
502 _pristineCow(gb);
503 oldValue = memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
504 memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)] = value;
505 break;
506 case GB_REGION_CART_BANK1:
507 case GB_REGION_CART_BANK1 + 1:
508 case GB_REGION_CART_BANK1 + 2:
509 case GB_REGION_CART_BANK1 + 3:
510 _pristineCow(gb);
511 if (segment < 0) {
512 oldValue = memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
513 memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)] = value;
514 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
515 oldValue = memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
516 memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0] = value;
517 } else {
518 return;
519 }
520 break;
521 case GB_REGION_VRAM:
522 case GB_REGION_VRAM + 1:
523 if (segment < 0) {
524 oldValue = gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
525 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
526 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank);
527 } else if (segment < 2) {
528 oldValue = gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0];
529 gb->video.vramBank[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0] = value;
530 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0);
531 } else {
532 return;
533 }
534 break;
535 case GB_REGION_EXTERNAL_RAM:
536 case GB_REGION_EXTERNAL_RAM + 1:
537 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
538 return;
539 case GB_REGION_WORKING_RAM_BANK0:
540 case GB_REGION_WORKING_RAM_BANK0 + 2:
541 oldValue = memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
542 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
543 break;
544 case GB_REGION_WORKING_RAM_BANK1:
545 if (segment < 0) {
546 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
547 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
548 } else if (segment < 8) {
549 oldValue = memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0];
550 memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0] = value;
551 } else {
552 return;
553 }
554 break;
555 default:
556 if (address < GB_BASE_OAM) {
557 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
558 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
559 } else if (address < GB_BASE_UNUSABLE) {
560 oldValue = gb->video.oam.raw[address & 0xFF];
561 gb->video.oam.raw[address & 0xFF] = value;
562 } else if (address < GB_BASE_HRAM) {
563 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
564 return;
565 } else if (address < GB_BASE_IE) {
566 oldValue = memory->hram[address & GB_SIZE_HRAM];
567 memory->hram[address & GB_SIZE_HRAM] = value;
568 } else {
569 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
570 return;
571 }
572 }
573 if (old) {
574 *old = oldValue;
575 }
576}
577
578void GBMemorySerialize(const struct GB* gb, struct GBSerializedState* state) {
579 const struct GBMemory* memory = &gb->memory;
580 memcpy(state->wram, memory->wram, GB_SIZE_WORKING_RAM);
581 memcpy(state->hram, memory->hram, GB_SIZE_HRAM);
582 STORE_16LE(memory->currentBank, 0, &state->memory.currentBank);
583 state->memory.wramCurrentBank = memory->wramCurrentBank;
584 state->memory.sramCurrentBank = memory->sramCurrentBank;
585
586 STORE_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
587 STORE_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
588
589 STORE_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
590 STORE_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
591
592 STORE_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
593 state->memory.dmaRemaining = memory->dmaRemaining;
594 memcpy(state->memory.rtcRegs, memory->rtcRegs, sizeof(state->memory.rtcRegs));
595
596 STORE_32LE(memory->dmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.dmaNext);
597 STORE_32LE(memory->hdmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.hdmaNext);
598
599 GBSerializedMemoryFlags flags = 0;
600 flags = GBSerializedMemoryFlagsSetSramAccess(flags, memory->sramAccess);
601 flags = GBSerializedMemoryFlagsSetRtcAccess(flags, memory->rtcAccess);
602 flags = GBSerializedMemoryFlagsSetRtcLatched(flags, memory->rtcLatched);
603 flags = GBSerializedMemoryFlagsSetIme(flags, memory->ime);
604 flags = GBSerializedMemoryFlagsSetIsHdma(flags, memory->isHdma);
605 flags = GBSerializedMemoryFlagsSetActiveRtcReg(flags, memory->activeRtcReg);
606 STORE_16LE(flags, 0, &state->memory.flags);
607}
608
609void GBMemoryDeserialize(struct GB* gb, const struct GBSerializedState* state) {
610 struct GBMemory* memory = &gb->memory;
611 memcpy(memory->wram, state->wram, GB_SIZE_WORKING_RAM);
612 memcpy(memory->hram, state->hram, GB_SIZE_HRAM);
613 LOAD_16LE(memory->currentBank, 0, &state->memory.currentBank);
614 memory->wramCurrentBank = state->memory.wramCurrentBank;
615 memory->sramCurrentBank = state->memory.sramCurrentBank;
616
617 GBMBCSwitchBank(gb, memory->currentBank);
618 GBMemorySwitchWramBank(memory, memory->wramCurrentBank);
619 GBMBCSwitchSramBank(gb, memory->sramCurrentBank);
620
621 LOAD_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
622 LOAD_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
623
624 LOAD_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
625 LOAD_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
626
627 LOAD_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
628 memory->dmaRemaining = state->memory.dmaRemaining;
629 memcpy(memory->rtcRegs, state->memory.rtcRegs, sizeof(state->memory.rtcRegs));
630
631 uint32_t when;
632 LOAD_32LE(when, 0, &state->memory.dmaNext);
633 if (memory->dmaRemaining) {
634 mTimingSchedule(&gb->timing, &memory->dmaEvent, when);
635 }
636 LOAD_32LE(when, 0, &state->memory.hdmaNext);
637 if (memory->hdmaRemaining) {
638 mTimingSchedule(&gb->timing, &memory->hdmaEvent, when);
639 }
640
641 GBSerializedMemoryFlags flags;
642 LOAD_16LE(flags, 0, &state->memory.flags);
643 memory->sramAccess = GBSerializedMemoryFlagsGetSramAccess(flags);
644 memory->rtcAccess = GBSerializedMemoryFlagsGetRtcAccess(flags);
645 memory->rtcLatched = GBSerializedMemoryFlagsGetRtcLatched(flags);
646 memory->ime = GBSerializedMemoryFlagsGetIme(flags);
647 memory->isHdma = GBSerializedMemoryFlagsGetIsHdma(flags);
648 memory->activeRtcReg = GBSerializedMemoryFlagsGetActiveRtcReg(flags);
649}
650
651void _pristineCow(struct GB* gb) {
652 if (!gb->isPristine) {
653 return;
654 }
655 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
656 memcpy(newRom, gb->memory.rom, gb->memory.romSize);
657 memset(((uint8_t*) newRom) + gb->memory.romSize, 0xFF, GB_SIZE_CART_MAX - gb->memory.romSize);
658 if (gb->memory.rom == gb->memory.romBase) {
659 gb->memory.romBase = newRom;
660 }
661 gb->memory.rom = newRom;
662 GBMBCSwitchBank(gb, gb->memory.currentBank);
663}