src/gb/memory.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/memory.h>
7
8#include <mgba/core/interface.h>
9#include <mgba/internal/gb/gb.h>
10#include <mgba/internal/gb/io.h>
11#include <mgba/internal/gb/mbc.h>
12#include <mgba/internal/gb/serialize.h>
13#include <mgba/internal/lr35902/lr35902.h>
14
15#include <mgba-util/memory.h>
16
17mLOG_DEFINE_CATEGORY(GB_MEM, "GB Memory", "gb.memory");
18
19static void _pristineCow(struct GB* gba);
20
21static uint8_t GBFastLoad8(struct LR35902Core* cpu, uint16_t address) {
22 if (UNLIKELY(address >= cpu->memory.activeRegionEnd)) {
23 cpu->memory.setActiveRegion(cpu, address);
24 return cpu->memory.cpuLoad8(cpu, address);
25 }
26 return cpu->memory.activeRegion[address & cpu->memory.activeMask];
27}
28
29static void GBSetActiveRegion(struct LR35902Core* cpu, uint16_t address) {
30 struct GB* gb = (struct GB*) cpu->master;
31 struct GBMemory* memory = &gb->memory;
32 switch (address >> 12) {
33 case GB_REGION_CART_BANK0:
34 case GB_REGION_CART_BANK0 + 1:
35 case GB_REGION_CART_BANK0 + 2:
36 case GB_REGION_CART_BANK0 + 3:
37 cpu->memory.cpuLoad8 = GBFastLoad8;
38 cpu->memory.activeRegion = memory->romBase;
39 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1;
40 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
41 break;
42 case GB_REGION_CART_BANK1:
43 case GB_REGION_CART_BANK1 + 1:
44 case GB_REGION_CART_BANK1 + 2:
45 case GB_REGION_CART_BANK1 + 3:
46 cpu->memory.cpuLoad8 = GBFastLoad8;
47 cpu->memory.activeRegion = memory->romBank;
48 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
49 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
50 break;
51 default:
52 cpu->memory.cpuLoad8 = GBLoad8;
53 break;
54 }
55}
56
57static void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
58static void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
59
60void GBMemoryInit(struct GB* gb) {
61 struct LR35902Core* cpu = gb->cpu;
62 cpu->memory.cpuLoad8 = GBLoad8;
63 cpu->memory.load8 = GBLoad8;
64 cpu->memory.store8 = GBStore8;
65 cpu->memory.setActiveRegion = GBSetActiveRegion;
66
67 gb->memory.wram = 0;
68 gb->memory.wramBank = 0;
69 gb->memory.rom = 0;
70 gb->memory.romBank = 0;
71 gb->memory.romSize = 0;
72 gb->memory.sram = 0;
73 gb->memory.mbcType = GB_MBC_AUTODETECT;
74 gb->memory.mbc = 0;
75
76 gb->memory.rtc = NULL;
77
78 GBIOInit(gb);
79}
80
81void GBMemoryDeinit(struct GB* gb) {
82 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
83 if (gb->memory.rom) {
84 mappedMemoryFree(gb->memory.rom, gb->memory.romSize);
85 }
86}
87
88void GBMemoryReset(struct GB* gb) {
89 if (gb->memory.wram) {
90 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
91 }
92 gb->memory.wram = anonymousMemoryMap(GB_SIZE_WORKING_RAM);
93 if (gb->model >= GB_MODEL_CGB) {
94 uint32_t* base = (uint32_t*) gb->memory.wram;
95 size_t i;
96 uint32_t pattern = 0;
97 for (i = 0; i < GB_SIZE_WORKING_RAM / 4; i += 4) {
98 if ((i & 0x1FF) == 0) {
99 pattern = ~pattern;
100 }
101 base[i + 0] = pattern;
102 base[i + 1] = pattern;
103 base[i + 2] = ~pattern;
104 base[i + 3] = ~pattern;
105 }
106 }
107 GBMemorySwitchWramBank(&gb->memory, 1);
108 gb->memory.romBank = &gb->memory.rom[GB_SIZE_CART_BANK0];
109 gb->memory.currentBank = 1;
110 gb->memory.sramCurrentBank = 0;
111
112 gb->memory.ime = false;
113 gb->memory.ie = 0;
114
115 gb->memory.dmaRemaining = 0;
116 gb->memory.dmaSource = 0;
117 gb->memory.dmaDest = 0;
118 gb->memory.hdmaRemaining = 0;
119 gb->memory.hdmaSource = 0;
120 gb->memory.hdmaDest = 0;
121 gb->memory.isHdma = false;
122
123
124 gb->memory.dmaEvent.context = gb;
125 gb->memory.dmaEvent.name = "GB DMA";
126 gb->memory.dmaEvent.callback = _GBMemoryDMAService;
127 gb->memory.dmaEvent.priority = 0x40;
128 gb->memory.hdmaEvent.context = gb;
129 gb->memory.hdmaEvent.name = "GB HDMA";
130 gb->memory.hdmaEvent.callback = _GBMemoryHDMAService;
131 gb->memory.hdmaEvent.priority = 0x41;
132
133 gb->memory.sramAccess = false;
134 gb->memory.rtcAccess = false;
135 gb->memory.activeRtcReg = 0;
136 gb->memory.rtcLatched = false;
137 memset(&gb->memory.rtcRegs, 0, sizeof(gb->memory.rtcRegs));
138
139 memset(&gb->memory.hram, 0, sizeof(gb->memory.hram));
140 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
141
142 GBMBCInit(gb);
143 gb->memory.sramBank = gb->memory.sram;
144
145 if (!gb->memory.wram) {
146 GBMemoryDeinit(gb);
147 }
148}
149
150void GBMemorySwitchWramBank(struct GBMemory* memory, int bank) {
151 bank &= 7;
152 if (!bank) {
153 bank = 1;
154 }
155 memory->wramBank = &memory->wram[GB_SIZE_WORKING_RAM_BANK0 * bank];
156 memory->wramCurrentBank = bank;
157}
158
159uint8_t GBLoad8(struct LR35902Core* cpu, uint16_t address) {
160 struct GB* gb = (struct GB*) cpu->master;
161 struct GBMemory* memory = &gb->memory;
162 switch (address >> 12) {
163 case GB_REGION_CART_BANK0:
164 case GB_REGION_CART_BANK0 + 1:
165 case GB_REGION_CART_BANK0 + 2:
166 case GB_REGION_CART_BANK0 + 3:
167 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
168 case GB_REGION_CART_BANK1:
169 case GB_REGION_CART_BANK1 + 1:
170 case GB_REGION_CART_BANK1 + 2:
171 case GB_REGION_CART_BANK1 + 3:
172 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
173 case GB_REGION_VRAM:
174 case GB_REGION_VRAM + 1:
175 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
176 case GB_REGION_EXTERNAL_RAM:
177 case GB_REGION_EXTERNAL_RAM + 1:
178 if (memory->rtcAccess) {
179 return memory->rtcRegs[memory->activeRtcReg];
180 } else if (memory->sramAccess) {
181 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
182 } else if (memory->mbcType == GB_MBC7) {
183 return GBMBC7Read(memory, address);
184 } else if (memory->mbcType == GB_HuC3) {
185 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
186 }
187 return 0xFF;
188 case GB_REGION_WORKING_RAM_BANK0:
189 case GB_REGION_WORKING_RAM_BANK0 + 2:
190 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
191 case GB_REGION_WORKING_RAM_BANK1:
192 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
193 default:
194 if (address < GB_BASE_OAM) {
195 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
196 }
197 if (address < GB_BASE_UNUSABLE) {
198 if (gb->video.mode < 2) {
199 return gb->video.oam.raw[address & 0xFF];
200 }
201 return 0xFF;
202 }
203 if (address < GB_BASE_IO) {
204 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
205 return 0xFF;
206 }
207 if (address < GB_BASE_HRAM) {
208 return GBIORead(gb, address & (GB_SIZE_IO - 1));
209 }
210 if (address < GB_BASE_IE) {
211 return memory->hram[address & GB_SIZE_HRAM];
212 }
213 return GBIORead(gb, REG_IE);
214 }
215}
216
217void GBStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
218 struct GB* gb = (struct GB*) cpu->master;
219 struct GBMemory* memory = &gb->memory;
220 switch (address >> 12) {
221 case GB_REGION_CART_BANK0:
222 case GB_REGION_CART_BANK0 + 1:
223 case GB_REGION_CART_BANK0 + 2:
224 case GB_REGION_CART_BANK0 + 3:
225 case GB_REGION_CART_BANK1:
226 case GB_REGION_CART_BANK1 + 1:
227 case GB_REGION_CART_BANK1 + 2:
228 case GB_REGION_CART_BANK1 + 3:
229 memory->mbc(gb, address, value);
230 cpu->memory.setActiveRegion(cpu, cpu->pc);
231 return;
232 case GB_REGION_VRAM:
233 case GB_REGION_VRAM + 1:
234 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) | (GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank));
235 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
236 return;
237 case GB_REGION_EXTERNAL_RAM:
238 case GB_REGION_EXTERNAL_RAM + 1:
239 if (memory->rtcAccess) {
240 memory->rtcRegs[memory->activeRtcReg] = value;
241 } else if (memory->sramAccess) {
242 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
243 } else if (memory->mbcType == GB_MBC7) {
244 GBMBC7Write(memory, address, value);
245 }
246 gb->sramDirty |= GB_SRAM_DIRT_NEW;
247 return;
248 case GB_REGION_WORKING_RAM_BANK0:
249 case GB_REGION_WORKING_RAM_BANK0 + 2:
250 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
251 return;
252 case GB_REGION_WORKING_RAM_BANK1:
253 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
254 return;
255 default:
256 if (address < GB_BASE_OAM) {
257 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
258 } else if (address < GB_BASE_UNUSABLE) {
259 if (gb->video.mode < 2) {
260 gb->video.oam.raw[address & 0xFF] = value;
261 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
262 }
263 } else if (address < GB_BASE_IO) {
264 mLOG(GB_MEM, GAME_ERROR, "Attempt to write to unusable memory: %04X:%02X", address, value);
265 } else if (address < GB_BASE_HRAM) {
266 GBIOWrite(gb, address & (GB_SIZE_IO - 1), value);
267 } else if (address < GB_BASE_IE) {
268 memory->hram[address & GB_SIZE_HRAM] = value;
269 } else {
270 GBIOWrite(gb, REG_IE, value);
271 }
272 }
273}
274uint8_t GBView8(struct LR35902Core* cpu, uint16_t address, int segment) {
275 struct GB* gb = (struct GB*) cpu->master;
276 struct GBMemory* memory = &gb->memory;
277 switch (address >> 12) {
278 case GB_REGION_CART_BANK0:
279 case GB_REGION_CART_BANK0 + 1:
280 case GB_REGION_CART_BANK0 + 2:
281 case GB_REGION_CART_BANK0 + 3:
282 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
283 case GB_REGION_CART_BANK1:
284 case GB_REGION_CART_BANK1 + 1:
285 case GB_REGION_CART_BANK1 + 2:
286 case GB_REGION_CART_BANK1 + 3:
287 if (segment < 0) {
288 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
289 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
290 return memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
291 } else {
292 return 0xFF;
293 }
294 case GB_REGION_VRAM:
295 case GB_REGION_VRAM + 1:
296 if (segment < 0) {
297 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
298 } else if (segment < 2) {
299 return gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment *GB_SIZE_VRAM_BANK0];
300 } else {
301 return 0xFF;
302 }
303 case GB_REGION_EXTERNAL_RAM:
304 case GB_REGION_EXTERNAL_RAM + 1:
305 if (memory->rtcAccess) {
306 return memory->rtcRegs[memory->activeRtcReg];
307 } else if (memory->sramAccess) {
308 if (segment < 0) {
309 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
310 } else if ((size_t) segment * GB_SIZE_EXTERNAL_RAM < gb->sramSize) {
311 return memory->sram[(address & (GB_SIZE_EXTERNAL_RAM - 1)) + segment *GB_SIZE_EXTERNAL_RAM];
312 } else {
313 return 0xFF;
314 }
315 } else if (memory->mbcType == GB_MBC7) {
316 return GBMBC7Read(memory, address);
317 } else if (memory->mbcType == GB_HuC3) {
318 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
319 }
320 return 0xFF;
321 case GB_REGION_WORKING_RAM_BANK0:
322 case GB_REGION_WORKING_RAM_BANK0 + 2:
323 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
324 case GB_REGION_WORKING_RAM_BANK1:
325 if (segment < 0) {
326 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
327 } else if (segment < 8) {
328 return memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment *GB_SIZE_WORKING_RAM_BANK0];
329 } else {
330 return 0xFF;
331 }
332 default:
333 if (address < GB_BASE_OAM) {
334 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
335 }
336 if (address < GB_BASE_UNUSABLE) {
337 if (gb->video.mode < 2) {
338 return gb->video.oam.raw[address & 0xFF];
339 }
340 return 0xFF;
341 }
342 if (address < GB_BASE_IO) {
343 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
344 return 0xFF;
345 }
346 if (address < GB_BASE_HRAM) {
347 return GBIORead(gb, address & (GB_SIZE_IO - 1));
348 }
349 if (address < GB_BASE_IE) {
350 return memory->hram[address & GB_SIZE_HRAM];
351 }
352 return GBIORead(gb, REG_IE);
353 }
354}
355
356void GBMemoryDMA(struct GB* gb, uint16_t base) {
357 if (base > 0xF100) {
358 return;
359 }
360 gb->cpu->memory.store8 = GBDMAStore8;
361 gb->cpu->memory.load8 = GBDMALoad8;
362 gb->cpu->memory.cpuLoad8 = GBDMALoad8;
363 mTimingSchedule(&gb->timing, &gb->memory.dmaEvent, 8);
364 if (gb->cpu->cycles + 8 < gb->cpu->nextEvent) {
365 gb->cpu->nextEvent = gb->cpu->cycles + 8;
366 }
367 gb->memory.dmaSource = base;
368 gb->memory.dmaDest = 0;
369 gb->memory.dmaRemaining = 0xA0;
370}
371
372void GBMemoryWriteHDMA5(struct GB* gb, uint8_t value) {
373 gb->memory.hdmaSource = gb->memory.io[REG_HDMA1] << 8;
374 gb->memory.hdmaSource |= gb->memory.io[REG_HDMA2];
375 gb->memory.hdmaDest = gb->memory.io[REG_HDMA3] << 8;
376 gb->memory.hdmaDest |= gb->memory.io[REG_HDMA4];
377 gb->memory.hdmaSource &= 0xFFF0;
378 if (gb->memory.hdmaSource >= 0x8000 && gb->memory.hdmaSource < 0xA000) {
379 mLOG(GB_MEM, GAME_ERROR, "Invalid HDMA source: %04X", gb->memory.hdmaSource);
380 return;
381 }
382 gb->memory.hdmaDest &= 0x1FF0;
383 gb->memory.hdmaDest |= 0x8000;
384 bool wasHdma = gb->memory.isHdma;
385 gb->memory.isHdma = value & 0x80;
386 if ((!wasHdma && !gb->memory.isHdma) || gb->video.mode == 0) {
387 gb->memory.hdmaRemaining = ((value & 0x7F) + 1) * 0x10;
388 gb->cpuBlocked = true;
389 mTimingSchedule(&gb->timing, &gb->memory.hdmaEvent, 0);
390 gb->cpu->nextEvent = gb->cpu->cycles;
391 }
392}
393
394void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
395 struct GB* gb = context;
396 uint8_t b = GBLoad8(gb->cpu, gb->memory.dmaSource);
397 // TODO: Can DMA write OAM during modes 2-3?
398 gb->video.oam.raw[gb->memory.dmaDest] = b;
399 gb->video.renderer->writeOAM(gb->video.renderer, gb->memory.dmaDest);
400 ++gb->memory.dmaSource;
401 ++gb->memory.dmaDest;
402 --gb->memory.dmaRemaining;
403 if (gb->memory.dmaRemaining) {
404 mTimingSchedule(timing, &gb->memory.dmaEvent, 4 - cyclesLate);
405 } else {
406 gb->cpu->memory.store8 = GBStore8;
407 gb->cpu->memory.load8 = GBLoad8;
408 }
409}
410
411void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
412 struct GB* gb = context;
413 gb->cpuBlocked = true;
414 uint8_t b = gb->cpu->memory.load8(gb->cpu, gb->memory.hdmaSource);
415 gb->cpu->memory.store8(gb->cpu, gb->memory.hdmaDest, b);
416 ++gb->memory.hdmaSource;
417 ++gb->memory.hdmaDest;
418 --gb->memory.hdmaRemaining;
419 if (gb->memory.hdmaRemaining) {
420 mTimingDeschedule(timing, &gb->memory.hdmaEvent);
421 mTimingSchedule(timing, &gb->memory.hdmaEvent, 2 - cyclesLate);
422 } else {
423 gb->cpuBlocked = false;
424 gb->memory.io[REG_HDMA1] = gb->memory.hdmaSource >> 8;
425 gb->memory.io[REG_HDMA2] = gb->memory.hdmaSource;
426 gb->memory.io[REG_HDMA3] = gb->memory.hdmaDest >> 8;
427 gb->memory.io[REG_HDMA4] = gb->memory.hdmaDest;
428 if (gb->memory.isHdma) {
429 --gb->memory.io[REG_HDMA5];
430 if (gb->memory.io[REG_HDMA5] == 0xFF) {
431 gb->memory.isHdma = false;
432 }
433 } else {
434 gb->memory.io[REG_HDMA5] = 0xFF;
435 }
436 }
437}
438
439struct OAMBlock {
440 uint16_t low;
441 uint16_t high;
442};
443
444static const struct OAMBlock _oamBlockDMG[] = {
445 { 0xA000, 0xFE00 },
446 { 0xA000, 0xFE00 },
447 { 0xA000, 0xFE00 },
448 { 0xA000, 0xFE00 },
449 { 0x8000, 0xA000 },
450 { 0xA000, 0xFE00 },
451 { 0xA000, 0xFE00 },
452 { 0xA000, 0xFE00 },
453};
454
455static const struct OAMBlock _oamBlockCGB[] = {
456 { 0xA000, 0xC000 },
457 { 0xA000, 0xC000 },
458 { 0xA000, 0xC000 },
459 { 0xA000, 0xC000 },
460 { 0x8000, 0xA000 },
461 { 0xA000, 0xC000 },
462 { 0xC000, 0xFE00 },
463 { 0xA000, 0xC000 },
464};
465
466uint8_t GBDMALoad8(struct LR35902Core* cpu, uint16_t address) {
467 struct GB* gb = (struct GB*) cpu->master;
468 struct GBMemory* memory = &gb->memory;
469 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
470 block = &block[memory->dmaSource >> 13];
471 if (address >= block->low && address < block->high) {
472 return 0xFF;
473 }
474 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
475 return 0xFF;
476 }
477 return GBLoad8(cpu, address);
478}
479
480void GBDMAStore8(struct LR35902Core* cpu, uint16_t address, int8_t value) {
481 struct GB* gb = (struct GB*) cpu->master;
482 struct GBMemory* memory = &gb->memory;
483 const struct OAMBlock* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
484 block = &block[memory->dmaSource >> 13];
485 if (address >= block->low && address < block->high) {
486 return;
487 }
488 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
489 return;
490 }
491 GBStore8(cpu, address, value);
492}
493
494void GBPatch8(struct LR35902Core* cpu, uint16_t address, int8_t value, int8_t* old, int segment) {
495 struct GB* gb = (struct GB*) cpu->master;
496 struct GBMemory* memory = &gb->memory;
497 int8_t oldValue = -1;
498
499 switch (address >> 12) {
500 case GB_REGION_CART_BANK0:
501 case GB_REGION_CART_BANK0 + 1:
502 case GB_REGION_CART_BANK0 + 2:
503 case GB_REGION_CART_BANK0 + 3:
504 _pristineCow(gb);
505 oldValue = memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
506 memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)] = value;
507 break;
508 case GB_REGION_CART_BANK1:
509 case GB_REGION_CART_BANK1 + 1:
510 case GB_REGION_CART_BANK1 + 2:
511 case GB_REGION_CART_BANK1 + 3:
512 _pristineCow(gb);
513 if (segment < 0) {
514 oldValue = memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
515 memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)] = value;
516 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
517 oldValue = memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
518 memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0] = value;
519 } else {
520 return;
521 }
522 break;
523 case GB_REGION_VRAM:
524 case GB_REGION_VRAM + 1:
525 if (segment < 0) {
526 oldValue = gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
527 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
528 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank);
529 } else if (segment < 2) {
530 oldValue = gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0];
531 gb->video.vramBank[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0] = value;
532 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0);
533 } else {
534 return;
535 }
536 break;
537 case GB_REGION_EXTERNAL_RAM:
538 case GB_REGION_EXTERNAL_RAM + 1:
539 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
540 return;
541 case GB_REGION_WORKING_RAM_BANK0:
542 case GB_REGION_WORKING_RAM_BANK0 + 2:
543 oldValue = memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
544 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
545 break;
546 case GB_REGION_WORKING_RAM_BANK1:
547 if (segment < 0) {
548 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
549 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
550 } else if (segment < 8) {
551 oldValue = memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0];
552 memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0] = value;
553 } else {
554 return;
555 }
556 break;
557 default:
558 if (address < GB_BASE_OAM) {
559 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
560 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
561 } else if (address < GB_BASE_UNUSABLE) {
562 oldValue = gb->video.oam.raw[address & 0xFF];
563 gb->video.oam.raw[address & 0xFF] = value;
564 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
565 } else if (address < GB_BASE_HRAM) {
566 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
567 return;
568 } else if (address < GB_BASE_IE) {
569 oldValue = memory->hram[address & GB_SIZE_HRAM];
570 memory->hram[address & GB_SIZE_HRAM] = value;
571 } else {
572 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
573 return;
574 }
575 }
576 if (old) {
577 *old = oldValue;
578 }
579}
580
581void GBMemorySerialize(const struct GB* gb, struct GBSerializedState* state) {
582 const struct GBMemory* memory = &gb->memory;
583 memcpy(state->wram, memory->wram, GB_SIZE_WORKING_RAM);
584 memcpy(state->hram, memory->hram, GB_SIZE_HRAM);
585 STORE_16LE(memory->currentBank, 0, &state->memory.currentBank);
586 state->memory.wramCurrentBank = memory->wramCurrentBank;
587 state->memory.sramCurrentBank = memory->sramCurrentBank;
588
589 STORE_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
590 STORE_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
591
592 STORE_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
593 STORE_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
594
595 STORE_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
596 state->memory.dmaRemaining = memory->dmaRemaining;
597 memcpy(state->memory.rtcRegs, memory->rtcRegs, sizeof(state->memory.rtcRegs));
598
599 STORE_32LE(memory->dmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.dmaNext);
600 STORE_32LE(memory->hdmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.hdmaNext);
601
602 GBSerializedMemoryFlags flags = 0;
603 flags = GBSerializedMemoryFlagsSetSramAccess(flags, memory->sramAccess);
604 flags = GBSerializedMemoryFlagsSetRtcAccess(flags, memory->rtcAccess);
605 flags = GBSerializedMemoryFlagsSetRtcLatched(flags, memory->rtcLatched);
606 flags = GBSerializedMemoryFlagsSetIme(flags, memory->ime);
607 flags = GBSerializedMemoryFlagsSetIsHdma(flags, memory->isHdma);
608 flags = GBSerializedMemoryFlagsSetActiveRtcReg(flags, memory->activeRtcReg);
609 STORE_16LE(flags, 0, &state->memory.flags);
610}
611
612void GBMemoryDeserialize(struct GB* gb, const struct GBSerializedState* state) {
613 struct GBMemory* memory = &gb->memory;
614 memcpy(memory->wram, state->wram, GB_SIZE_WORKING_RAM);
615 memcpy(memory->hram, state->hram, GB_SIZE_HRAM);
616 LOAD_16LE(memory->currentBank, 0, &state->memory.currentBank);
617 memory->wramCurrentBank = state->memory.wramCurrentBank;
618 memory->sramCurrentBank = state->memory.sramCurrentBank;
619
620 GBMBCSwitchBank(gb, memory->currentBank);
621 GBMemorySwitchWramBank(memory, memory->wramCurrentBank);
622 GBMBCSwitchSramBank(gb, memory->sramCurrentBank);
623
624 LOAD_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
625 LOAD_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
626
627 LOAD_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
628 LOAD_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
629
630 LOAD_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
631 memory->dmaRemaining = state->memory.dmaRemaining;
632 memcpy(memory->rtcRegs, state->memory.rtcRegs, sizeof(state->memory.rtcRegs));
633
634 uint32_t when;
635 LOAD_32LE(when, 0, &state->memory.dmaNext);
636 if (memory->dmaRemaining) {
637 mTimingSchedule(&gb->timing, &memory->dmaEvent, when);
638 }
639 LOAD_32LE(when, 0, &state->memory.hdmaNext);
640 if (memory->hdmaRemaining) {
641 mTimingSchedule(&gb->timing, &memory->hdmaEvent, when);
642 }
643
644 GBSerializedMemoryFlags flags;
645 LOAD_16LE(flags, 0, &state->memory.flags);
646 memory->sramAccess = GBSerializedMemoryFlagsGetSramAccess(flags);
647 memory->rtcAccess = GBSerializedMemoryFlagsGetRtcAccess(flags);
648 memory->rtcLatched = GBSerializedMemoryFlagsGetRtcLatched(flags);
649 memory->ime = GBSerializedMemoryFlagsGetIme(flags);
650 memory->isHdma = GBSerializedMemoryFlagsGetIsHdma(flags);
651 memory->activeRtcReg = GBSerializedMemoryFlagsGetActiveRtcReg(flags);
652}
653
654void _pristineCow(struct GB* gb) {
655 if (!gb->isPristine) {
656 return;
657 }
658 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
659 memcpy(newRom, gb->memory.rom, gb->memory.romSize);
660 memset(((uint8_t*) newRom) + gb->memory.romSize, 0xFF, GB_SIZE_CART_MAX - gb->memory.romSize);
661 if (gb->memory.rom == gb->memory.romBase) {
662 gb->memory.romBase = newRom;
663 }
664 gb->memory.rom = newRom;
665 GBMBCSwitchBank(gb, gb->memory.currentBank);
666}