src/gb/memory.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/memory.h>
7
8#include <mgba/core/interface.h>
9#include <mgba/internal/gb/gb.h>
10#include <mgba/internal/gb/io.h>
11#include <mgba/internal/gb/mbc.h>
12#include <mgba/internal/gb/serialize.h>
13#include <mgba/internal/lr35902/lr35902.h>
14
15#include <mgba-util/memory.h>
16
17mLOG_DEFINE_CATEGORY(GB_MEM, "GB Memory", "gb.memory");
18
19struct OAMBlock {
20 uint16_t low;
21 uint16_t high;
22};
23
24static const struct OAMBlock _oamBlockDMG[] = {
25 { 0xA000, 0xFE00 },
26 { 0xA000, 0xFE00 },
27 { 0xA000, 0xFE00 },
28 { 0xA000, 0xFE00 },
29 { 0x8000, 0xA000 },
30 { 0xA000, 0xFE00 },
31 { 0xA000, 0xFE00 },
32 { 0xA000, 0xFE00 },
33};
34
35static const struct OAMBlock _oamBlockCGB[] = {
36 { 0xA000, 0xC000 },
37 { 0xA000, 0xC000 },
38 { 0xA000, 0xC000 },
39 { 0xA000, 0xC000 },
40 { 0x8000, 0xA000 },
41 { 0xA000, 0xC000 },
42 { 0xC000, 0xFE00 },
43 { 0xA000, 0xC000 },
44};
45
46static void _pristineCow(struct GB* gba);
47
48static uint8_t GBFastLoad8(struct LR35902Core* cpu, uint16_t address) {
49 if (UNLIKELY(address >= cpu->memory.activeRegionEnd)) {
50 cpu->memory.setActiveRegion(cpu, address);
51 return cpu->memory.cpuLoad8(cpu, address);
52 }
53 return cpu->memory.activeRegion[address & cpu->memory.activeMask];
54}
55
56static void GBSetActiveRegion(struct LR35902Core* cpu, uint16_t address) {
57 struct GB* gb = (struct GB*) cpu->master;
58 struct GBMemory* memory = &gb->memory;
59 switch (address >> 12) {
60 case GB_REGION_CART_BANK0:
61 case GB_REGION_CART_BANK0 + 1:
62 case GB_REGION_CART_BANK0 + 2:
63 case GB_REGION_CART_BANK0 + 3:
64 cpu->memory.cpuLoad8 = GBFastLoad8;
65 cpu->memory.activeRegion = memory->romBase;
66 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1;
67 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
68 break;
69 case GB_REGION_CART_BANK1:
70 case GB_REGION_CART_BANK1 + 1:
71 case GB_REGION_CART_BANK1 + 2:
72 case GB_REGION_CART_BANK1 + 3:
73 cpu->memory.cpuLoad8 = GBFastLoad8;
74 cpu->memory.activeRegion = memory->romBank;
75 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
76 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
77 break;
78 default:
79 cpu->memory.cpuLoad8 = GBLoad8;
80 break;
81 }
82}
83
84static void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
85static void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
86
87void GBMemoryInit(struct GB* gb) {
88 struct LR35902Core* cpu = gb->cpu;
89 cpu->memory.cpuLoad8 = GBLoad8;
90 cpu->memory.load8 = GBLoad8;
91 cpu->memory.store8 = GBStore8;
92 cpu->memory.currentSegment = GBCurrentSegment;
93 cpu->memory.setActiveRegion = GBSetActiveRegion;
94
95 gb->memory.wram = 0;
96 gb->memory.wramBank = 0;
97 gb->memory.rom = 0;
98 gb->memory.romBank = 0;
99 gb->memory.romSize = 0;
100 gb->memory.sram = 0;
101 gb->memory.mbcType = GB_MBC_AUTODETECT;
102 gb->memory.mbcRead = NULL;
103 gb->memory.mbcWrite = NULL;
104
105 gb->memory.rtc = NULL;
106
107 GBIOInit(gb);
108}
109
110void GBMemoryDeinit(struct GB* gb) {
111 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
112 if (gb->memory.rom) {
113 mappedMemoryFree(gb->memory.rom, gb->memory.romSize);
114 }
115}
116
117void GBMemoryReset(struct GB* gb) {
118 if (gb->memory.wram) {
119 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
120 }
121 gb->memory.wram = anonymousMemoryMap(GB_SIZE_WORKING_RAM);
122 if (gb->model >= GB_MODEL_CGB) {
123 uint32_t* base = (uint32_t*) gb->memory.wram;
124 size_t i;
125 uint32_t pattern = 0;
126 for (i = 0; i < GB_SIZE_WORKING_RAM / 4; i += 4) {
127 if ((i & 0x1FF) == 0) {
128 pattern = ~pattern;
129 }
130 base[i + 0] = pattern;
131 base[i + 1] = pattern;
132 base[i + 2] = ~pattern;
133 base[i + 3] = ~pattern;
134 }
135 }
136 GBMemorySwitchWramBank(&gb->memory, 1);
137 gb->memory.romBank = &gb->memory.rom[GB_SIZE_CART_BANK0];
138 gb->memory.currentBank = 1;
139 gb->memory.sramCurrentBank = 0;
140
141 gb->memory.ime = false;
142 gb->memory.ie = 0;
143
144 gb->memory.dmaRemaining = 0;
145 gb->memory.dmaSource = 0;
146 gb->memory.dmaDest = 0;
147 gb->memory.hdmaRemaining = 0;
148 gb->memory.hdmaSource = 0;
149 gb->memory.hdmaDest = 0;
150 gb->memory.isHdma = false;
151
152
153 gb->memory.dmaEvent.context = gb;
154 gb->memory.dmaEvent.name = "GB DMA";
155 gb->memory.dmaEvent.callback = _GBMemoryDMAService;
156 gb->memory.dmaEvent.priority = 0x40;
157 gb->memory.hdmaEvent.context = gb;
158 gb->memory.hdmaEvent.name = "GB HDMA";
159 gb->memory.hdmaEvent.callback = _GBMemoryHDMAService;
160 gb->memory.hdmaEvent.priority = 0x41;
161
162 memset(&gb->memory.hram, 0, sizeof(gb->memory.hram));
163 switch (gb->memory.mbcType) {
164 case GB_MBC1:
165 gb->memory.mbcState.mbc1.mode = 0;
166 break;
167 default:
168 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
169 }
170
171 GBMBCInit(gb);
172 gb->memory.sramBank = gb->memory.sram;
173
174 if (!gb->memory.wram) {
175 GBMemoryDeinit(gb);
176 }
177}
178
179void GBMemorySwitchWramBank(struct GBMemory* memory, int bank) {
180 bank &= 7;
181 if (!bank) {
182 bank = 1;
183 }
184 memory->wramBank = &memory->wram[GB_SIZE_WORKING_RAM_BANK0 * bank];
185 memory->wramCurrentBank = bank;
186}
187
188uint8_t GBLoad8(struct LR35902Core* cpu, uint16_t address) {
189 struct GB* gb = (struct GB*) cpu->master;
190 struct GBMemory* memory = &gb->memory;
191 if (gb->memory.dmaRemaining) {
192 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
193 block = &block[memory->dmaSource >> 13];
194 if (address >= block->low && address < block->high) {
195 return 0xFF;
196 }
197 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
198 return 0xFF;
199 }
200 }
201 switch (address >> 12) {
202 case GB_REGION_CART_BANK0:
203 case GB_REGION_CART_BANK0 + 1:
204 case GB_REGION_CART_BANK0 + 2:
205 case GB_REGION_CART_BANK0 + 3:
206 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
207 case GB_REGION_CART_BANK1:
208 case GB_REGION_CART_BANK1 + 1:
209 case GB_REGION_CART_BANK1 + 2:
210 case GB_REGION_CART_BANK1 + 3:
211 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
212 case GB_REGION_VRAM:
213 case GB_REGION_VRAM + 1:
214 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
215 case GB_REGION_EXTERNAL_RAM:
216 case GB_REGION_EXTERNAL_RAM + 1:
217 if (memory->rtcAccess) {
218 return memory->rtcRegs[memory->activeRtcReg];
219 } else if (memory->mbcRead) {
220 return memory->mbcRead(memory, address);
221 } else if (memory->sramAccess) {
222 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
223 } else if (memory->mbcType == GB_HuC3) {
224 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
225 }
226 return 0xFF;
227 case GB_REGION_WORKING_RAM_BANK0:
228 case GB_REGION_WORKING_RAM_BANK0 + 2:
229 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
230 case GB_REGION_WORKING_RAM_BANK1:
231 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
232 default:
233 if (address < GB_BASE_OAM) {
234 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
235 }
236 if (address < GB_BASE_UNUSABLE) {
237 if (gb->video.mode < 2) {
238 return gb->video.oam.raw[address & 0xFF];
239 }
240 return 0xFF;
241 }
242 if (address < GB_BASE_IO) {
243 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
244 return 0xFF;
245 }
246 if (address < GB_BASE_HRAM) {
247 return GBIORead(gb, address & (GB_SIZE_IO - 1));
248 }
249 if (address < GB_BASE_IE) {
250 return memory->hram[address & GB_SIZE_HRAM];
251 }
252 return GBIORead(gb, REG_IE);
253 }
254}
255
256void GBStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
257 struct GB* gb = (struct GB*) cpu->master;
258 struct GBMemory* memory = &gb->memory;
259 if (gb->memory.dmaRemaining) {
260 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
261 block = &block[memory->dmaSource >> 13];
262 if (address >= block->low && address < block->high) {
263 return;
264 }
265 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
266 return;
267 }
268 }
269 switch (address >> 12) {
270 case GB_REGION_CART_BANK0:
271 case GB_REGION_CART_BANK0 + 1:
272 case GB_REGION_CART_BANK0 + 2:
273 case GB_REGION_CART_BANK0 + 3:
274 case GB_REGION_CART_BANK1:
275 case GB_REGION_CART_BANK1 + 1:
276 case GB_REGION_CART_BANK1 + 2:
277 case GB_REGION_CART_BANK1 + 3:
278 memory->mbcWrite(gb, address, value);
279 cpu->memory.setActiveRegion(cpu, cpu->pc);
280 return;
281 case GB_REGION_VRAM:
282 case GB_REGION_VRAM + 1:
283 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) | (GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank));
284 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
285 return;
286 case GB_REGION_EXTERNAL_RAM:
287 case GB_REGION_EXTERNAL_RAM + 1:
288 if (memory->rtcAccess) {
289 memory->rtcRegs[memory->activeRtcReg] = value;
290 } else if (memory->sramAccess) {
291 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
292 } else if (memory->mbcType == GB_MBC7) {
293 GBMBC7Write(memory, address, value);
294 }
295 gb->sramDirty |= GB_SRAM_DIRT_NEW;
296 return;
297 case GB_REGION_WORKING_RAM_BANK0:
298 case GB_REGION_WORKING_RAM_BANK0 + 2:
299 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
300 return;
301 case GB_REGION_WORKING_RAM_BANK1:
302 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
303 return;
304 default:
305 if (address < GB_BASE_OAM) {
306 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
307 } else if (address < GB_BASE_UNUSABLE) {
308 if (gb->video.mode < 2) {
309 gb->video.oam.raw[address & 0xFF] = value;
310 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
311 }
312 } else if (address < GB_BASE_IO) {
313 mLOG(GB_MEM, GAME_ERROR, "Attempt to write to unusable memory: %04X:%02X", address, value);
314 } else if (address < GB_BASE_HRAM) {
315 GBIOWrite(gb, address & (GB_SIZE_IO - 1), value);
316 } else if (address < GB_BASE_IE) {
317 memory->hram[address & GB_SIZE_HRAM] = value;
318 } else {
319 GBIOWrite(gb, REG_IE, value);
320 }
321 }
322}
323
324int GBCurrentSegment(struct LR35902Core* cpu, uint16_t address) {
325 struct GB* gb = (struct GB*) cpu->master;
326 struct GBMemory* memory = &gb->memory;
327 switch (address >> 12) {
328 case GB_REGION_CART_BANK0:
329 case GB_REGION_CART_BANK0 + 1:
330 case GB_REGION_CART_BANK0 + 2:
331 case GB_REGION_CART_BANK0 + 3:
332 return 0;
333 case GB_REGION_CART_BANK1:
334 case GB_REGION_CART_BANK1 + 1:
335 case GB_REGION_CART_BANK1 + 2:
336 case GB_REGION_CART_BANK1 + 3:
337 return memory->currentBank;
338 case GB_REGION_VRAM:
339 case GB_REGION_VRAM + 1:
340 return gb->video.vramCurrentBank;
341 case GB_REGION_EXTERNAL_RAM:
342 case GB_REGION_EXTERNAL_RAM + 1:
343 return memory->sramCurrentBank;
344 case GB_REGION_WORKING_RAM_BANK0:
345 case GB_REGION_WORKING_RAM_BANK0 + 2:
346 return 0;
347 case GB_REGION_WORKING_RAM_BANK1:
348 return memory->wramCurrentBank;
349 default:
350 return 0;
351 }
352}
353
354uint8_t GBView8(struct LR35902Core* cpu, uint16_t address, int segment) {
355 struct GB* gb = (struct GB*) cpu->master;
356 struct GBMemory* memory = &gb->memory;
357 switch (address >> 12) {
358 case GB_REGION_CART_BANK0:
359 case GB_REGION_CART_BANK0 + 1:
360 case GB_REGION_CART_BANK0 + 2:
361 case GB_REGION_CART_BANK0 + 3:
362 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
363 case GB_REGION_CART_BANK1:
364 case GB_REGION_CART_BANK1 + 1:
365 case GB_REGION_CART_BANK1 + 2:
366 case GB_REGION_CART_BANK1 + 3:
367 if (segment < 0) {
368 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
369 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
370 return memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
371 } else {
372 return 0xFF;
373 }
374 case GB_REGION_VRAM:
375 case GB_REGION_VRAM + 1:
376 if (segment < 0) {
377 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
378 } else if (segment < 2) {
379 return gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment *GB_SIZE_VRAM_BANK0];
380 } else {
381 return 0xFF;
382 }
383 case GB_REGION_EXTERNAL_RAM:
384 case GB_REGION_EXTERNAL_RAM + 1:
385 if (memory->rtcAccess) {
386 return memory->rtcRegs[memory->activeRtcReg];
387 } else if (memory->sramAccess) {
388 if (segment < 0) {
389 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
390 } else if ((size_t) segment * GB_SIZE_EXTERNAL_RAM < gb->sramSize) {
391 return memory->sram[(address & (GB_SIZE_EXTERNAL_RAM - 1)) + segment *GB_SIZE_EXTERNAL_RAM];
392 } else {
393 return 0xFF;
394 }
395 } else if (memory->mbcRead) {
396 return memory->mbcRead(memory, address);
397 } else if (memory->mbcType == GB_HuC3) {
398 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
399 }
400 return 0xFF;
401 case GB_REGION_WORKING_RAM_BANK0:
402 case GB_REGION_WORKING_RAM_BANK0 + 2:
403 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
404 case GB_REGION_WORKING_RAM_BANK1:
405 if (segment < 0) {
406 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
407 } else if (segment < 8) {
408 return memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment *GB_SIZE_WORKING_RAM_BANK0];
409 } else {
410 return 0xFF;
411 }
412 default:
413 if (address < GB_BASE_OAM) {
414 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
415 }
416 if (address < GB_BASE_UNUSABLE) {
417 if (gb->video.mode < 2) {
418 return gb->video.oam.raw[address & 0xFF];
419 }
420 return 0xFF;
421 }
422 if (address < GB_BASE_IO) {
423 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
424 return 0xFF;
425 }
426 if (address < GB_BASE_HRAM) {
427 return GBIORead(gb, address & (GB_SIZE_IO - 1));
428 }
429 if (address < GB_BASE_IE) {
430 return memory->hram[address & GB_SIZE_HRAM];
431 }
432 return GBIORead(gb, REG_IE);
433 }
434}
435
436void GBMemoryDMA(struct GB* gb, uint16_t base) {
437 if (base > 0xF100) {
438 return;
439 }
440 mTimingSchedule(&gb->timing, &gb->memory.dmaEvent, 8);
441 if (gb->cpu->cycles + 8 < gb->cpu->nextEvent) {
442 gb->cpu->nextEvent = gb->cpu->cycles + 8;
443 }
444 gb->memory.dmaSource = base;
445 gb->memory.dmaDest = 0;
446 gb->memory.dmaRemaining = 0xA0;
447}
448
449void GBMemoryWriteHDMA5(struct GB* gb, uint8_t value) {
450 gb->memory.hdmaSource = gb->memory.io[REG_HDMA1] << 8;
451 gb->memory.hdmaSource |= gb->memory.io[REG_HDMA2];
452 gb->memory.hdmaDest = gb->memory.io[REG_HDMA3] << 8;
453 gb->memory.hdmaDest |= gb->memory.io[REG_HDMA4];
454 gb->memory.hdmaSource &= 0xFFF0;
455 if (gb->memory.hdmaSource >= 0x8000 && gb->memory.hdmaSource < 0xA000) {
456 mLOG(GB_MEM, GAME_ERROR, "Invalid HDMA source: %04X", gb->memory.hdmaSource);
457 return;
458 }
459 gb->memory.hdmaDest &= 0x1FF0;
460 gb->memory.hdmaDest |= 0x8000;
461 bool wasHdma = gb->memory.isHdma;
462 gb->memory.isHdma = value & 0x80;
463 if ((!wasHdma && !gb->memory.isHdma) || gb->video.mode == 0) {
464 gb->memory.hdmaRemaining = ((value & 0x7F) + 1) * 0x10;
465 gb->cpuBlocked = true;
466 mTimingSchedule(&gb->timing, &gb->memory.hdmaEvent, 0);
467 gb->cpu->nextEvent = gb->cpu->cycles;
468 }
469}
470
471void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
472 struct GB* gb = context;
473 int dmaRemaining = gb->memory.dmaRemaining;
474 gb->memory.dmaRemaining = 0;
475 uint8_t b = GBLoad8(gb->cpu, gb->memory.dmaSource);
476 // TODO: Can DMA write OAM during modes 2-3?
477 gb->video.oam.raw[gb->memory.dmaDest] = b;
478 gb->video.renderer->writeOAM(gb->video.renderer, gb->memory.dmaDest);
479 ++gb->memory.dmaSource;
480 ++gb->memory.dmaDest;
481 gb->memory.dmaRemaining = dmaRemaining - 1;
482 if (gb->memory.dmaRemaining) {
483 mTimingSchedule(timing, &gb->memory.dmaEvent, 4 - cyclesLate);
484 }
485}
486
487void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
488 struct GB* gb = context;
489 gb->cpuBlocked = true;
490 uint8_t b = gb->cpu->memory.load8(gb->cpu, gb->memory.hdmaSource);
491 gb->cpu->memory.store8(gb->cpu, gb->memory.hdmaDest, b);
492 ++gb->memory.hdmaSource;
493 ++gb->memory.hdmaDest;
494 --gb->memory.hdmaRemaining;
495 if (gb->memory.hdmaRemaining) {
496 mTimingDeschedule(timing, &gb->memory.hdmaEvent);
497 mTimingSchedule(timing, &gb->memory.hdmaEvent, 2 - cyclesLate);
498 } else {
499 gb->cpuBlocked = false;
500 gb->memory.io[REG_HDMA1] = gb->memory.hdmaSource >> 8;
501 gb->memory.io[REG_HDMA2] = gb->memory.hdmaSource;
502 gb->memory.io[REG_HDMA3] = gb->memory.hdmaDest >> 8;
503 gb->memory.io[REG_HDMA4] = gb->memory.hdmaDest;
504 if (gb->memory.isHdma) {
505 --gb->memory.io[REG_HDMA5];
506 if (gb->memory.io[REG_HDMA5] == 0xFF) {
507 gb->memory.isHdma = false;
508 }
509 } else {
510 gb->memory.io[REG_HDMA5] = 0xFF;
511 }
512 }
513}
514
515void GBPatch8(struct LR35902Core* cpu, uint16_t address, int8_t value, int8_t* old, int segment) {
516 struct GB* gb = (struct GB*) cpu->master;
517 struct GBMemory* memory = &gb->memory;
518 int8_t oldValue = -1;
519
520 switch (address >> 12) {
521 case GB_REGION_CART_BANK0:
522 case GB_REGION_CART_BANK0 + 1:
523 case GB_REGION_CART_BANK0 + 2:
524 case GB_REGION_CART_BANK0 + 3:
525 _pristineCow(gb);
526 oldValue = memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
527 memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)] = value;
528 break;
529 case GB_REGION_CART_BANK1:
530 case GB_REGION_CART_BANK1 + 1:
531 case GB_REGION_CART_BANK1 + 2:
532 case GB_REGION_CART_BANK1 + 3:
533 _pristineCow(gb);
534 if (segment < 0) {
535 oldValue = memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
536 memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)] = value;
537 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
538 oldValue = memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
539 memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0] = value;
540 } else {
541 return;
542 }
543 break;
544 case GB_REGION_VRAM:
545 case GB_REGION_VRAM + 1:
546 if (segment < 0) {
547 oldValue = gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
548 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
549 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank);
550 } else if (segment < 2) {
551 oldValue = gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0];
552 gb->video.vramBank[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0] = value;
553 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0);
554 } else {
555 return;
556 }
557 break;
558 case GB_REGION_EXTERNAL_RAM:
559 case GB_REGION_EXTERNAL_RAM + 1:
560 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
561 return;
562 case GB_REGION_WORKING_RAM_BANK0:
563 case GB_REGION_WORKING_RAM_BANK0 + 2:
564 oldValue = memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
565 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
566 break;
567 case GB_REGION_WORKING_RAM_BANK1:
568 if (segment < 0) {
569 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
570 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
571 } else if (segment < 8) {
572 oldValue = memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0];
573 memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0] = value;
574 } else {
575 return;
576 }
577 break;
578 default:
579 if (address < GB_BASE_OAM) {
580 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
581 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
582 } else if (address < GB_BASE_UNUSABLE) {
583 oldValue = gb->video.oam.raw[address & 0xFF];
584 gb->video.oam.raw[address & 0xFF] = value;
585 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
586 } else if (address < GB_BASE_HRAM) {
587 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
588 return;
589 } else if (address < GB_BASE_IE) {
590 oldValue = memory->hram[address & GB_SIZE_HRAM];
591 memory->hram[address & GB_SIZE_HRAM] = value;
592 } else {
593 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
594 return;
595 }
596 }
597 if (old) {
598 *old = oldValue;
599 }
600}
601
602void GBMemorySerialize(const struct GB* gb, struct GBSerializedState* state) {
603 const struct GBMemory* memory = &gb->memory;
604 memcpy(state->wram, memory->wram, GB_SIZE_WORKING_RAM);
605 memcpy(state->hram, memory->hram, GB_SIZE_HRAM);
606 STORE_16LE(memory->currentBank, 0, &state->memory.currentBank);
607 state->memory.wramCurrentBank = memory->wramCurrentBank;
608 state->memory.sramCurrentBank = memory->sramCurrentBank;
609
610 STORE_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
611 STORE_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
612
613 STORE_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
614 STORE_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
615
616 STORE_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
617 state->memory.dmaRemaining = memory->dmaRemaining;
618 memcpy(state->memory.rtcRegs, memory->rtcRegs, sizeof(state->memory.rtcRegs));
619
620 STORE_32LE(memory->dmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.dmaNext);
621 STORE_32LE(memory->hdmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.hdmaNext);
622
623 GBSerializedMemoryFlags flags = 0;
624 flags = GBSerializedMemoryFlagsSetSramAccess(flags, memory->sramAccess);
625 flags = GBSerializedMemoryFlagsSetRtcAccess(flags, memory->rtcAccess);
626 flags = GBSerializedMemoryFlagsSetRtcLatched(flags, memory->rtcLatched);
627 flags = GBSerializedMemoryFlagsSetIme(flags, memory->ime);
628 flags = GBSerializedMemoryFlagsSetIsHdma(flags, memory->isHdma);
629 flags = GBSerializedMemoryFlagsSetActiveRtcReg(flags, memory->activeRtcReg);
630 STORE_16LE(flags, 0, &state->memory.flags);
631}
632
633void GBMemoryDeserialize(struct GB* gb, const struct GBSerializedState* state) {
634 struct GBMemory* memory = &gb->memory;
635 memcpy(memory->wram, state->wram, GB_SIZE_WORKING_RAM);
636 memcpy(memory->hram, state->hram, GB_SIZE_HRAM);
637 LOAD_16LE(memory->currentBank, 0, &state->memory.currentBank);
638 memory->wramCurrentBank = state->memory.wramCurrentBank;
639 memory->sramCurrentBank = state->memory.sramCurrentBank;
640
641 GBMBCSwitchBank(gb, memory->currentBank);
642 GBMemorySwitchWramBank(memory, memory->wramCurrentBank);
643 GBMBCSwitchSramBank(gb, memory->sramCurrentBank);
644
645 LOAD_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
646 LOAD_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
647
648 LOAD_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
649 LOAD_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
650
651 LOAD_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
652 memory->dmaRemaining = state->memory.dmaRemaining;
653 memcpy(memory->rtcRegs, state->memory.rtcRegs, sizeof(state->memory.rtcRegs));
654
655 uint32_t when;
656 LOAD_32LE(when, 0, &state->memory.dmaNext);
657 if (memory->dmaRemaining) {
658 mTimingSchedule(&gb->timing, &memory->dmaEvent, when);
659 }
660 LOAD_32LE(when, 0, &state->memory.hdmaNext);
661 if (memory->hdmaRemaining) {
662 mTimingSchedule(&gb->timing, &memory->hdmaEvent, when);
663 }
664
665 GBSerializedMemoryFlags flags;
666 LOAD_16LE(flags, 0, &state->memory.flags);
667 memory->sramAccess = GBSerializedMemoryFlagsGetSramAccess(flags);
668 memory->rtcAccess = GBSerializedMemoryFlagsGetRtcAccess(flags);
669 memory->rtcLatched = GBSerializedMemoryFlagsGetRtcLatched(flags);
670 memory->ime = GBSerializedMemoryFlagsGetIme(flags);
671 memory->isHdma = GBSerializedMemoryFlagsGetIsHdma(flags);
672 memory->activeRtcReg = GBSerializedMemoryFlagsGetActiveRtcReg(flags);
673}
674
675void _pristineCow(struct GB* gb) {
676 if (!gb->isPristine) {
677 return;
678 }
679 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
680 memcpy(newRom, gb->memory.rom, gb->memory.romSize);
681 memset(((uint8_t*) newRom) + gb->memory.romSize, 0xFF, GB_SIZE_CART_MAX - gb->memory.romSize);
682 if (gb->memory.rom == gb->memory.romBase) {
683 gb->memory.romBase = newRom;
684 }
685 gb->memory.rom = newRom;
686 GBMBCSwitchBank(gb, gb->memory.currentBank);
687 gb->isPristine = false;
688}