src/gb/memory.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/memory.h>
7
8#include <mgba/core/interface.h>
9#include <mgba/internal/gb/gb.h>
10#include <mgba/internal/gb/io.h>
11#include <mgba/internal/gb/mbc.h>
12#include <mgba/internal/gb/serialize.h>
13#include <mgba/internal/lr35902/lr35902.h>
14
15#include <mgba-util/memory.h>
16
17mLOG_DEFINE_CATEGORY(GB_MEM, "GB Memory", "gb.memory");
18
19struct OAMBlock {
20 uint16_t low;
21 uint16_t high;
22};
23
24static const struct OAMBlock _oamBlockDMG[] = {
25 { 0xA000, 0xFE00 },
26 { 0xA000, 0xFE00 },
27 { 0xA000, 0xFE00 },
28 { 0xA000, 0xFE00 },
29 { 0x8000, 0xA000 },
30 { 0xA000, 0xFE00 },
31 { 0xA000, 0xFE00 },
32 { 0xA000, 0xFE00 },
33};
34
35static const struct OAMBlock _oamBlockCGB[] = {
36 { 0xA000, 0xC000 },
37 { 0xA000, 0xC000 },
38 { 0xA000, 0xC000 },
39 { 0xA000, 0xC000 },
40 { 0x8000, 0xA000 },
41 { 0xA000, 0xC000 },
42 { 0xC000, 0xFE00 },
43 { 0xA000, 0xC000 },
44};
45
46static void _pristineCow(struct GB* gba);
47
48static uint8_t GBFastLoad8(struct LR35902Core* cpu, uint16_t address) {
49 if (UNLIKELY(address >= cpu->memory.activeRegionEnd)) {
50 cpu->memory.setActiveRegion(cpu, address);
51 return cpu->memory.cpuLoad8(cpu, address);
52 }
53 return cpu->memory.activeRegion[address & cpu->memory.activeMask];
54}
55
56static void GBSetActiveRegion(struct LR35902Core* cpu, uint16_t address) {
57 struct GB* gb = (struct GB*) cpu->master;
58 struct GBMemory* memory = &gb->memory;
59 switch (address >> 12) {
60 case GB_REGION_CART_BANK0:
61 case GB_REGION_CART_BANK0 + 1:
62 case GB_REGION_CART_BANK0 + 2:
63 case GB_REGION_CART_BANK0 + 3:
64 cpu->memory.cpuLoad8 = GBFastLoad8;
65 cpu->memory.activeRegion = memory->romBase;
66 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1;
67 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
68 break;
69 case GB_REGION_CART_BANK1:
70 case GB_REGION_CART_BANK1 + 1:
71 case GB_REGION_CART_BANK1 + 2:
72 case GB_REGION_CART_BANK1 + 3:
73 cpu->memory.cpuLoad8 = GBFastLoad8;
74 cpu->memory.activeRegion = memory->romBank;
75 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
76 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
77 break;
78 default:
79 cpu->memory.cpuLoad8 = GBLoad8;
80 break;
81 }
82}
83
84static void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
85static void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
86
87void GBMemoryInit(struct GB* gb) {
88 struct LR35902Core* cpu = gb->cpu;
89 cpu->memory.cpuLoad8 = GBLoad8;
90 cpu->memory.load8 = GBLoad8;
91 cpu->memory.store8 = GBStore8;
92 cpu->memory.currentSegment = GBCurrentSegment;
93 cpu->memory.setActiveRegion = GBSetActiveRegion;
94
95 gb->memory.wram = 0;
96 gb->memory.wramBank = 0;
97 gb->memory.rom = 0;
98 gb->memory.romBank = 0;
99 gb->memory.romSize = 0;
100 gb->memory.sram = 0;
101 gb->memory.mbcType = GB_MBC_AUTODETECT;
102 gb->memory.mbc = 0;
103
104 gb->memory.rtc = NULL;
105
106 GBIOInit(gb);
107}
108
109void GBMemoryDeinit(struct GB* gb) {
110 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
111 if (gb->memory.rom) {
112 mappedMemoryFree(gb->memory.rom, gb->memory.romSize);
113 }
114}
115
116void GBMemoryReset(struct GB* gb) {
117 if (gb->memory.wram) {
118 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
119 }
120 gb->memory.wram = anonymousMemoryMap(GB_SIZE_WORKING_RAM);
121 if (gb->model >= GB_MODEL_CGB) {
122 uint32_t* base = (uint32_t*) gb->memory.wram;
123 size_t i;
124 uint32_t pattern = 0;
125 for (i = 0; i < GB_SIZE_WORKING_RAM / 4; i += 4) {
126 if ((i & 0x1FF) == 0) {
127 pattern = ~pattern;
128 }
129 base[i + 0] = pattern;
130 base[i + 1] = pattern;
131 base[i + 2] = ~pattern;
132 base[i + 3] = ~pattern;
133 }
134 }
135 GBMemorySwitchWramBank(&gb->memory, 1);
136 gb->memory.romBank = &gb->memory.rom[GB_SIZE_CART_BANK0];
137 gb->memory.currentBank = 1;
138 gb->memory.sramCurrentBank = 0;
139
140 gb->memory.ime = false;
141 gb->memory.ie = 0;
142
143 gb->memory.dmaRemaining = 0;
144 gb->memory.dmaSource = 0;
145 gb->memory.dmaDest = 0;
146 gb->memory.hdmaRemaining = 0;
147 gb->memory.hdmaSource = 0;
148 gb->memory.hdmaDest = 0;
149 gb->memory.isHdma = false;
150
151
152 gb->memory.dmaEvent.context = gb;
153 gb->memory.dmaEvent.name = "GB DMA";
154 gb->memory.dmaEvent.callback = _GBMemoryDMAService;
155 gb->memory.dmaEvent.priority = 0x40;
156 gb->memory.hdmaEvent.context = gb;
157 gb->memory.hdmaEvent.name = "GB HDMA";
158 gb->memory.hdmaEvent.callback = _GBMemoryHDMAService;
159 gb->memory.hdmaEvent.priority = 0x41;
160
161 gb->memory.sramAccess = false;
162 gb->memory.rtcAccess = false;
163 gb->memory.activeRtcReg = 0;
164 gb->memory.rtcLatched = false;
165 memset(&gb->memory.rtcRegs, 0, sizeof(gb->memory.rtcRegs));
166
167 memset(&gb->memory.hram, 0, sizeof(gb->memory.hram));
168 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
169
170 GBMBCInit(gb);
171 gb->memory.sramBank = gb->memory.sram;
172
173 if (!gb->memory.wram) {
174 GBMemoryDeinit(gb);
175 }
176}
177
178void GBMemorySwitchWramBank(struct GBMemory* memory, int bank) {
179 bank &= 7;
180 if (!bank) {
181 bank = 1;
182 }
183 memory->wramBank = &memory->wram[GB_SIZE_WORKING_RAM_BANK0 * bank];
184 memory->wramCurrentBank = bank;
185}
186
187uint8_t GBLoad8(struct LR35902Core* cpu, uint16_t address) {
188 struct GB* gb = (struct GB*) cpu->master;
189 struct GBMemory* memory = &gb->memory;
190 if (gb->memory.dmaRemaining) {
191 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
192 block = &block[memory->dmaSource >> 13];
193 if (address >= block->low && address < block->high) {
194 return 0xFF;
195 }
196 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
197 return 0xFF;
198 }
199 }
200 switch (address >> 12) {
201 case GB_REGION_CART_BANK0:
202 case GB_REGION_CART_BANK0 + 1:
203 case GB_REGION_CART_BANK0 + 2:
204 case GB_REGION_CART_BANK0 + 3:
205 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
206 case GB_REGION_CART_BANK1:
207 case GB_REGION_CART_BANK1 + 1:
208 case GB_REGION_CART_BANK1 + 2:
209 case GB_REGION_CART_BANK1 + 3:
210 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
211 case GB_REGION_VRAM:
212 case GB_REGION_VRAM + 1:
213 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
214 case GB_REGION_EXTERNAL_RAM:
215 case GB_REGION_EXTERNAL_RAM + 1:
216 if (memory->rtcAccess) {
217 return memory->rtcRegs[memory->activeRtcReg];
218 } else if (memory->sramAccess) {
219 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
220 } else if (memory->mbcType == GB_MBC7) {
221 return GBMBC7Read(memory, address);
222 } else if (memory->mbcType == GB_HuC3) {
223 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
224 }
225 return 0xFF;
226 case GB_REGION_WORKING_RAM_BANK0:
227 case GB_REGION_WORKING_RAM_BANK0 + 2:
228 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
229 case GB_REGION_WORKING_RAM_BANK1:
230 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
231 default:
232 if (address < GB_BASE_OAM) {
233 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
234 }
235 if (address < GB_BASE_UNUSABLE) {
236 if (gb->video.mode < 2) {
237 return gb->video.oam.raw[address & 0xFF];
238 }
239 return 0xFF;
240 }
241 if (address < GB_BASE_IO) {
242 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
243 return 0xFF;
244 }
245 if (address < GB_BASE_HRAM) {
246 return GBIORead(gb, address & (GB_SIZE_IO - 1));
247 }
248 if (address < GB_BASE_IE) {
249 return memory->hram[address & GB_SIZE_HRAM];
250 }
251 return GBIORead(gb, REG_IE);
252 }
253}
254
255void GBStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
256 struct GB* gb = (struct GB*) cpu->master;
257 struct GBMemory* memory = &gb->memory;
258 if (gb->memory.dmaRemaining) {
259 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
260 block = &block[memory->dmaSource >> 13];
261 if (address >= block->low && address < block->high) {
262 return;
263 }
264 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
265 return;
266 }
267 }
268 switch (address >> 12) {
269 case GB_REGION_CART_BANK0:
270 case GB_REGION_CART_BANK0 + 1:
271 case GB_REGION_CART_BANK0 + 2:
272 case GB_REGION_CART_BANK0 + 3:
273 case GB_REGION_CART_BANK1:
274 case GB_REGION_CART_BANK1 + 1:
275 case GB_REGION_CART_BANK1 + 2:
276 case GB_REGION_CART_BANK1 + 3:
277 memory->mbc(gb, address, value);
278 cpu->memory.setActiveRegion(cpu, cpu->pc);
279 return;
280 case GB_REGION_VRAM:
281 case GB_REGION_VRAM + 1:
282 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) | (GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank));
283 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
284 return;
285 case GB_REGION_EXTERNAL_RAM:
286 case GB_REGION_EXTERNAL_RAM + 1:
287 if (memory->rtcAccess) {
288 memory->rtcRegs[memory->activeRtcReg] = value;
289 } else if (memory->sramAccess) {
290 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
291 } else if (memory->mbcType == GB_MBC7) {
292 GBMBC7Write(memory, address, value);
293 }
294 gb->sramDirty |= GB_SRAM_DIRT_NEW;
295 return;
296 case GB_REGION_WORKING_RAM_BANK0:
297 case GB_REGION_WORKING_RAM_BANK0 + 2:
298 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
299 return;
300 case GB_REGION_WORKING_RAM_BANK1:
301 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
302 return;
303 default:
304 if (address < GB_BASE_OAM) {
305 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
306 } else if (address < GB_BASE_UNUSABLE) {
307 if (gb->video.mode < 2) {
308 gb->video.oam.raw[address & 0xFF] = value;
309 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
310 }
311 } else if (address < GB_BASE_IO) {
312 mLOG(GB_MEM, GAME_ERROR, "Attempt to write to unusable memory: %04X:%02X", address, value);
313 } else if (address < GB_BASE_HRAM) {
314 GBIOWrite(gb, address & (GB_SIZE_IO - 1), value);
315 } else if (address < GB_BASE_IE) {
316 memory->hram[address & GB_SIZE_HRAM] = value;
317 } else {
318 GBIOWrite(gb, REG_IE, value);
319 }
320 }
321}
322
323int GBCurrentSegment(struct LR35902Core* cpu, uint16_t address) {
324 struct GB* gb = (struct GB*) cpu->master;
325 struct GBMemory* memory = &gb->memory;
326 switch (address >> 12) {
327 case GB_REGION_CART_BANK0:
328 case GB_REGION_CART_BANK0 + 1:
329 case GB_REGION_CART_BANK0 + 2:
330 case GB_REGION_CART_BANK0 + 3:
331 return 0;
332 case GB_REGION_CART_BANK1:
333 case GB_REGION_CART_BANK1 + 1:
334 case GB_REGION_CART_BANK1 + 2:
335 case GB_REGION_CART_BANK1 + 3:
336 return memory->currentBank;
337 case GB_REGION_VRAM:
338 case GB_REGION_VRAM + 1:
339 return gb->video.vramCurrentBank;
340 case GB_REGION_EXTERNAL_RAM:
341 case GB_REGION_EXTERNAL_RAM + 1:
342 return memory->sramCurrentBank;
343 case GB_REGION_WORKING_RAM_BANK0:
344 case GB_REGION_WORKING_RAM_BANK0 + 2:
345 return 0;
346 case GB_REGION_WORKING_RAM_BANK1:
347 return memory->wramCurrentBank;
348 default:
349 return 0;
350 }
351}
352
353uint8_t GBView8(struct LR35902Core* cpu, uint16_t address, int segment) {
354 struct GB* gb = (struct GB*) cpu->master;
355 struct GBMemory* memory = &gb->memory;
356 switch (address >> 12) {
357 case GB_REGION_CART_BANK0:
358 case GB_REGION_CART_BANK0 + 1:
359 case GB_REGION_CART_BANK0 + 2:
360 case GB_REGION_CART_BANK0 + 3:
361 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
362 case GB_REGION_CART_BANK1:
363 case GB_REGION_CART_BANK1 + 1:
364 case GB_REGION_CART_BANK1 + 2:
365 case GB_REGION_CART_BANK1 + 3:
366 if (segment < 0) {
367 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
368 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
369 return memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
370 } else {
371 return 0xFF;
372 }
373 case GB_REGION_VRAM:
374 case GB_REGION_VRAM + 1:
375 if (segment < 0) {
376 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
377 } else if (segment < 2) {
378 return gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment *GB_SIZE_VRAM_BANK0];
379 } else {
380 return 0xFF;
381 }
382 case GB_REGION_EXTERNAL_RAM:
383 case GB_REGION_EXTERNAL_RAM + 1:
384 if (memory->rtcAccess) {
385 return memory->rtcRegs[memory->activeRtcReg];
386 } else if (memory->sramAccess) {
387 if (segment < 0) {
388 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
389 } else if ((size_t) segment * GB_SIZE_EXTERNAL_RAM < gb->sramSize) {
390 return memory->sram[(address & (GB_SIZE_EXTERNAL_RAM - 1)) + segment *GB_SIZE_EXTERNAL_RAM];
391 } else {
392 return 0xFF;
393 }
394 } else if (memory->mbcType == GB_MBC7) {
395 return GBMBC7Read(memory, address);
396 } else if (memory->mbcType == GB_HuC3) {
397 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
398 }
399 return 0xFF;
400 case GB_REGION_WORKING_RAM_BANK0:
401 case GB_REGION_WORKING_RAM_BANK0 + 2:
402 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
403 case GB_REGION_WORKING_RAM_BANK1:
404 if (segment < 0) {
405 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
406 } else if (segment < 8) {
407 return memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment *GB_SIZE_WORKING_RAM_BANK0];
408 } else {
409 return 0xFF;
410 }
411 default:
412 if (address < GB_BASE_OAM) {
413 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
414 }
415 if (address < GB_BASE_UNUSABLE) {
416 if (gb->video.mode < 2) {
417 return gb->video.oam.raw[address & 0xFF];
418 }
419 return 0xFF;
420 }
421 if (address < GB_BASE_IO) {
422 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
423 return 0xFF;
424 }
425 if (address < GB_BASE_HRAM) {
426 return GBIORead(gb, address & (GB_SIZE_IO - 1));
427 }
428 if (address < GB_BASE_IE) {
429 return memory->hram[address & GB_SIZE_HRAM];
430 }
431 return GBIORead(gb, REG_IE);
432 }
433}
434
435void GBMemoryDMA(struct GB* gb, uint16_t base) {
436 if (base > 0xF100) {
437 return;
438 }
439 mTimingSchedule(&gb->timing, &gb->memory.dmaEvent, 8);
440 if (gb->cpu->cycles + 8 < gb->cpu->nextEvent) {
441 gb->cpu->nextEvent = gb->cpu->cycles + 8;
442 }
443 gb->memory.dmaSource = base;
444 gb->memory.dmaDest = 0;
445 gb->memory.dmaRemaining = 0xA0;
446}
447
448void GBMemoryWriteHDMA5(struct GB* gb, uint8_t value) {
449 gb->memory.hdmaSource = gb->memory.io[REG_HDMA1] << 8;
450 gb->memory.hdmaSource |= gb->memory.io[REG_HDMA2];
451 gb->memory.hdmaDest = gb->memory.io[REG_HDMA3] << 8;
452 gb->memory.hdmaDest |= gb->memory.io[REG_HDMA4];
453 gb->memory.hdmaSource &= 0xFFF0;
454 if (gb->memory.hdmaSource >= 0x8000 && gb->memory.hdmaSource < 0xA000) {
455 mLOG(GB_MEM, GAME_ERROR, "Invalid HDMA source: %04X", gb->memory.hdmaSource);
456 return;
457 }
458 gb->memory.hdmaDest &= 0x1FF0;
459 gb->memory.hdmaDest |= 0x8000;
460 bool wasHdma = gb->memory.isHdma;
461 gb->memory.isHdma = value & 0x80;
462 if ((!wasHdma && !gb->memory.isHdma) || gb->video.mode == 0) {
463 gb->memory.hdmaRemaining = ((value & 0x7F) + 1) * 0x10;
464 gb->cpuBlocked = true;
465 mTimingSchedule(&gb->timing, &gb->memory.hdmaEvent, 0);
466 gb->cpu->nextEvent = gb->cpu->cycles;
467 }
468}
469
470void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
471 struct GB* gb = context;
472 int dmaRemaining = gb->memory.dmaRemaining;
473 gb->memory.dmaRemaining = 0;
474 uint8_t b = GBLoad8(gb->cpu, gb->memory.dmaSource);
475 // TODO: Can DMA write OAM during modes 2-3?
476 gb->video.oam.raw[gb->memory.dmaDest] = b;
477 gb->video.renderer->writeOAM(gb->video.renderer, gb->memory.dmaDest);
478 ++gb->memory.dmaSource;
479 ++gb->memory.dmaDest;
480 gb->memory.dmaRemaining = dmaRemaining - 1;
481 if (gb->memory.dmaRemaining) {
482 mTimingSchedule(timing, &gb->memory.dmaEvent, 4 - cyclesLate);
483 }
484}
485
486void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
487 struct GB* gb = context;
488 gb->cpuBlocked = true;
489 uint8_t b = gb->cpu->memory.load8(gb->cpu, gb->memory.hdmaSource);
490 gb->cpu->memory.store8(gb->cpu, gb->memory.hdmaDest, b);
491 ++gb->memory.hdmaSource;
492 ++gb->memory.hdmaDest;
493 --gb->memory.hdmaRemaining;
494 if (gb->memory.hdmaRemaining) {
495 mTimingDeschedule(timing, &gb->memory.hdmaEvent);
496 mTimingSchedule(timing, &gb->memory.hdmaEvent, 2 - cyclesLate);
497 } else {
498 gb->cpuBlocked = false;
499 gb->memory.io[REG_HDMA1] = gb->memory.hdmaSource >> 8;
500 gb->memory.io[REG_HDMA2] = gb->memory.hdmaSource;
501 gb->memory.io[REG_HDMA3] = gb->memory.hdmaDest >> 8;
502 gb->memory.io[REG_HDMA4] = gb->memory.hdmaDest;
503 if (gb->memory.isHdma) {
504 --gb->memory.io[REG_HDMA5];
505 if (gb->memory.io[REG_HDMA5] == 0xFF) {
506 gb->memory.isHdma = false;
507 }
508 } else {
509 gb->memory.io[REG_HDMA5] = 0xFF;
510 }
511 }
512}
513
514void GBPatch8(struct LR35902Core* cpu, uint16_t address, int8_t value, int8_t* old, int segment) {
515 struct GB* gb = (struct GB*) cpu->master;
516 struct GBMemory* memory = &gb->memory;
517 int8_t oldValue = -1;
518
519 switch (address >> 12) {
520 case GB_REGION_CART_BANK0:
521 case GB_REGION_CART_BANK0 + 1:
522 case GB_REGION_CART_BANK0 + 2:
523 case GB_REGION_CART_BANK0 + 3:
524 _pristineCow(gb);
525 oldValue = memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
526 memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)] = value;
527 break;
528 case GB_REGION_CART_BANK1:
529 case GB_REGION_CART_BANK1 + 1:
530 case GB_REGION_CART_BANK1 + 2:
531 case GB_REGION_CART_BANK1 + 3:
532 _pristineCow(gb);
533 if (segment < 0) {
534 oldValue = memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
535 memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)] = value;
536 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
537 oldValue = memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
538 memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0] = value;
539 } else {
540 return;
541 }
542 break;
543 case GB_REGION_VRAM:
544 case GB_REGION_VRAM + 1:
545 if (segment < 0) {
546 oldValue = gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
547 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
548 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank);
549 } else if (segment < 2) {
550 oldValue = gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0];
551 gb->video.vramBank[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0] = value;
552 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0);
553 } else {
554 return;
555 }
556 break;
557 case GB_REGION_EXTERNAL_RAM:
558 case GB_REGION_EXTERNAL_RAM + 1:
559 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
560 return;
561 case GB_REGION_WORKING_RAM_BANK0:
562 case GB_REGION_WORKING_RAM_BANK0 + 2:
563 oldValue = memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
564 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
565 break;
566 case GB_REGION_WORKING_RAM_BANK1:
567 if (segment < 0) {
568 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
569 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
570 } else if (segment < 8) {
571 oldValue = memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0];
572 memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0] = value;
573 } else {
574 return;
575 }
576 break;
577 default:
578 if (address < GB_BASE_OAM) {
579 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
580 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
581 } else if (address < GB_BASE_UNUSABLE) {
582 oldValue = gb->video.oam.raw[address & 0xFF];
583 gb->video.oam.raw[address & 0xFF] = value;
584 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
585 } else if (address < GB_BASE_HRAM) {
586 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
587 return;
588 } else if (address < GB_BASE_IE) {
589 oldValue = memory->hram[address & GB_SIZE_HRAM];
590 memory->hram[address & GB_SIZE_HRAM] = value;
591 } else {
592 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
593 return;
594 }
595 }
596 if (old) {
597 *old = oldValue;
598 }
599}
600
601void GBMemorySerialize(const struct GB* gb, struct GBSerializedState* state) {
602 const struct GBMemory* memory = &gb->memory;
603 memcpy(state->wram, memory->wram, GB_SIZE_WORKING_RAM);
604 memcpy(state->hram, memory->hram, GB_SIZE_HRAM);
605 STORE_16LE(memory->currentBank, 0, &state->memory.currentBank);
606 state->memory.wramCurrentBank = memory->wramCurrentBank;
607 state->memory.sramCurrentBank = memory->sramCurrentBank;
608
609 STORE_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
610 STORE_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
611
612 STORE_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
613 STORE_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
614
615 STORE_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
616 state->memory.dmaRemaining = memory->dmaRemaining;
617 memcpy(state->memory.rtcRegs, memory->rtcRegs, sizeof(state->memory.rtcRegs));
618
619 STORE_32LE(memory->dmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.dmaNext);
620 STORE_32LE(memory->hdmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.hdmaNext);
621
622 GBSerializedMemoryFlags flags = 0;
623 flags = GBSerializedMemoryFlagsSetSramAccess(flags, memory->sramAccess);
624 flags = GBSerializedMemoryFlagsSetRtcAccess(flags, memory->rtcAccess);
625 flags = GBSerializedMemoryFlagsSetRtcLatched(flags, memory->rtcLatched);
626 flags = GBSerializedMemoryFlagsSetIme(flags, memory->ime);
627 flags = GBSerializedMemoryFlagsSetIsHdma(flags, memory->isHdma);
628 flags = GBSerializedMemoryFlagsSetActiveRtcReg(flags, memory->activeRtcReg);
629 STORE_16LE(flags, 0, &state->memory.flags);
630}
631
632void GBMemoryDeserialize(struct GB* gb, const struct GBSerializedState* state) {
633 struct GBMemory* memory = &gb->memory;
634 memcpy(memory->wram, state->wram, GB_SIZE_WORKING_RAM);
635 memcpy(memory->hram, state->hram, GB_SIZE_HRAM);
636 LOAD_16LE(memory->currentBank, 0, &state->memory.currentBank);
637 memory->wramCurrentBank = state->memory.wramCurrentBank;
638 memory->sramCurrentBank = state->memory.sramCurrentBank;
639
640 GBMBCSwitchBank(gb, memory->currentBank);
641 GBMemorySwitchWramBank(memory, memory->wramCurrentBank);
642 GBMBCSwitchSramBank(gb, memory->sramCurrentBank);
643
644 LOAD_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
645 LOAD_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
646
647 LOAD_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
648 LOAD_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
649
650 LOAD_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
651 memory->dmaRemaining = state->memory.dmaRemaining;
652 memcpy(memory->rtcRegs, state->memory.rtcRegs, sizeof(state->memory.rtcRegs));
653
654 uint32_t when;
655 LOAD_32LE(when, 0, &state->memory.dmaNext);
656 if (memory->dmaRemaining) {
657 mTimingSchedule(&gb->timing, &memory->dmaEvent, when);
658 }
659 LOAD_32LE(when, 0, &state->memory.hdmaNext);
660 if (memory->hdmaRemaining) {
661 mTimingSchedule(&gb->timing, &memory->hdmaEvent, when);
662 }
663
664 GBSerializedMemoryFlags flags;
665 LOAD_16LE(flags, 0, &state->memory.flags);
666 memory->sramAccess = GBSerializedMemoryFlagsGetSramAccess(flags);
667 memory->rtcAccess = GBSerializedMemoryFlagsGetRtcAccess(flags);
668 memory->rtcLatched = GBSerializedMemoryFlagsGetRtcLatched(flags);
669 memory->ime = GBSerializedMemoryFlagsGetIme(flags);
670 memory->isHdma = GBSerializedMemoryFlagsGetIsHdma(flags);
671 memory->activeRtcReg = GBSerializedMemoryFlagsGetActiveRtcReg(flags);
672}
673
674void _pristineCow(struct GB* gb) {
675 if (!gb->isPristine) {
676 return;
677 }
678 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
679 memcpy(newRom, gb->memory.rom, gb->memory.romSize);
680 memset(((uint8_t*) newRom) + gb->memory.romSize, 0xFF, GB_SIZE_CART_MAX - gb->memory.romSize);
681 if (gb->memory.rom == gb->memory.romBase) {
682 gb->memory.romBase = newRom;
683 }
684 gb->memory.rom = newRom;
685 GBMBCSwitchBank(gb, gb->memory.currentBank);
686}