src/gb/memory.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/memory.h>
7
8#include <mgba/core/interface.h>
9#include <mgba/internal/gb/gb.h>
10#include <mgba/internal/gb/io.h>
11#include <mgba/internal/gb/mbc.h>
12#include <mgba/internal/gb/serialize.h>
13#include <mgba/internal/sm83/sm83.h>
14
15#include <mgba-util/memory.h>
16
17mLOG_DEFINE_CATEGORY(GB_MEM, "GB Memory", "gb.memory");
18
19static const uint8_t _yankBuffer[] = { 0xFF };
20
21enum GBBus {
22 GB_BUS_CPU,
23 GB_BUS_MAIN,
24 GB_BUS_VRAM,
25 GB_BUS_RAM
26};
27
28static const enum GBBus _oamBlockDMG[] = {
29 GB_BUS_MAIN, // 0x0000
30 GB_BUS_MAIN, // 0x2000
31 GB_BUS_MAIN, // 0x4000
32 GB_BUS_MAIN, // 0x6000
33 GB_BUS_VRAM, // 0x8000
34 GB_BUS_MAIN, // 0xA000
35 GB_BUS_MAIN, // 0xC000
36 GB_BUS_CPU, // 0xE000
37};
38
39static const enum GBBus _oamBlockCGB[] = {
40 GB_BUS_MAIN, // 0x0000
41 GB_BUS_MAIN, // 0x2000
42 GB_BUS_MAIN, // 0x4000
43 GB_BUS_MAIN, // 0x6000
44 GB_BUS_VRAM, // 0x8000
45 GB_BUS_MAIN, // 0xA000
46 GB_BUS_RAM, // 0xC000
47 GB_BUS_CPU // 0xE000
48};
49
50static const uint8_t _blockedRegion[1] = { 0xFF };
51
52static void _pristineCow(struct GB* gba);
53
54static uint8_t GBFastLoad8(struct SM83Core* cpu, uint16_t address) {
55 if (UNLIKELY(address >= cpu->memory.activeRegionEnd)) {
56 cpu->memory.setActiveRegion(cpu, address);
57 return cpu->memory.cpuLoad8(cpu, address);
58 }
59 return cpu->memory.activeRegion[address & cpu->memory.activeMask];
60}
61
62static void GBSetActiveRegion(struct SM83Core* cpu, uint16_t address) {
63 struct GB* gb = (struct GB*) cpu->master;
64 struct GBMemory* memory = &gb->memory;
65 switch (address >> 12) {
66 case GB_REGION_CART_BANK0:
67 case GB_REGION_CART_BANK0 + 1:
68 case GB_REGION_CART_BANK0 + 2:
69 case GB_REGION_CART_BANK0 + 3:
70 cpu->memory.cpuLoad8 = GBFastLoad8;
71 cpu->memory.activeRegion = memory->romBase;
72 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1;
73 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
74 if (gb->memory.romSize < GB_SIZE_CART_BANK0) {
75 if (address >= gb->memory.romSize) {
76 cpu->memory.activeRegion = _yankBuffer;
77 cpu->memory.activeMask = 0;
78 } else {
79 cpu->memory.activeRegionEnd = gb->memory.romSize;
80 }
81 }
82 break;
83 case GB_REGION_CART_BANK1:
84 case GB_REGION_CART_BANK1 + 1:
85 case GB_REGION_CART_BANK1 + 2:
86 case GB_REGION_CART_BANK1 + 3:
87 if ((gb->memory.mbcType & GB_UNL_BBD) == GB_UNL_BBD) {
88 cpu->memory.cpuLoad8 = GBLoad8;
89 break;
90 }
91 cpu->memory.cpuLoad8 = GBFastLoad8;
92 if (gb->memory.mbcType != GB_MBC6) {
93 cpu->memory.activeRegion = memory->romBank;
94 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
95 cpu->memory.activeMask = GB_SIZE_CART_BANK0 - 1;
96 } else {
97 cpu->memory.activeMask = GB_SIZE_CART_HALFBANK - 1;
98 if (address & 0x2000) {
99 cpu->memory.activeRegion = memory->mbcState.mbc6.romBank1;
100 cpu->memory.activeRegionEnd = GB_BASE_VRAM;
101 } else {
102 cpu->memory.activeRegion = memory->romBank;
103 cpu->memory.activeRegionEnd = GB_BASE_CART_BANK1 + 0x2000;
104 }
105 }
106 if (gb->memory.romSize < GB_SIZE_CART_BANK0 * 2) {
107 if (address >= gb->memory.romSize) {
108 cpu->memory.activeRegion = _yankBuffer;
109 cpu->memory.activeMask = 0;
110 } else {
111 cpu->memory.activeRegionEnd = gb->memory.romSize;
112 }
113 }
114 break;
115 default:
116 cpu->memory.cpuLoad8 = GBLoad8;
117 break;
118 }
119 if (gb->memory.dmaRemaining) {
120 const enum GBBus* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
121 enum GBBus dmaBus = block[memory->dmaSource >> 13];
122 enum GBBus accessBus = block[address >> 13];
123 if ((dmaBus != GB_BUS_CPU && dmaBus == accessBus) || (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE)) {
124 cpu->memory.activeRegion = _blockedRegion;
125 cpu->memory.activeMask = 0;
126 }
127 }
128}
129
130static void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
131static void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate);
132
133void GBMemoryInit(struct GB* gb) {
134 struct SM83Core* cpu = gb->cpu;
135 cpu->memory.cpuLoad8 = GBLoad8;
136 cpu->memory.load8 = GBLoad8;
137 cpu->memory.store8 = GBStore8;
138 cpu->memory.currentSegment = GBCurrentSegment;
139 cpu->memory.setActiveRegion = GBSetActiveRegion;
140
141 gb->memory.wram = 0;
142 gb->memory.wramBank = 0;
143 gb->memory.rom = 0;
144 gb->memory.romBank = 0;
145 gb->memory.romSize = 0;
146 gb->memory.sram = 0;
147 gb->memory.mbcType = GB_MBC_AUTODETECT;
148 gb->memory.mbcRead = NULL;
149 gb->memory.mbcWrite = NULL;
150
151 gb->memory.rtc = NULL;
152 gb->memory.rotation = NULL;
153 gb->memory.rumble = NULL;
154 gb->memory.cam = NULL;
155
156 GBIOInit(gb);
157}
158
159void GBMemoryDeinit(struct GB* gb) {
160 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
161 if (gb->memory.rom) {
162 mappedMemoryFree(gb->memory.rom, gb->memory.romSize);
163 }
164}
165
166void GBMemoryReset(struct GB* gb) {
167 if (gb->memory.wram) {
168 mappedMemoryFree(gb->memory.wram, GB_SIZE_WORKING_RAM);
169 }
170 gb->memory.wram = anonymousMemoryMap(GB_SIZE_WORKING_RAM);
171 if (gb->model >= GB_MODEL_CGB) {
172 uint32_t* base = (uint32_t*) gb->memory.wram;
173 size_t i;
174 uint32_t pattern = 0;
175 for (i = 0; i < GB_SIZE_WORKING_RAM / 4; i += 4) {
176 if ((i & 0x1FF) == 0) {
177 pattern = ~pattern;
178 }
179 base[i + 0] = pattern;
180 base[i + 1] = pattern;
181 base[i + 2] = ~pattern;
182 base[i + 3] = ~pattern;
183 }
184 }
185 GBMemorySwitchWramBank(&gb->memory, 1);
186 gb->memory.ime = false;
187 gb->memory.ie = 0;
188
189 gb->memory.dmaRemaining = 0;
190 gb->memory.dmaSource = 0;
191 gb->memory.dmaDest = 0;
192 gb->memory.hdmaRemaining = 0;
193 gb->memory.hdmaSource = 0;
194 gb->memory.hdmaDest = 0;
195 gb->memory.isHdma = false;
196
197
198 gb->memory.dmaEvent.context = gb;
199 gb->memory.dmaEvent.name = "GB DMA";
200 gb->memory.dmaEvent.callback = _GBMemoryDMAService;
201 gb->memory.dmaEvent.priority = 0x40;
202 gb->memory.hdmaEvent.context = gb;
203 gb->memory.hdmaEvent.name = "GB HDMA";
204 gb->memory.hdmaEvent.callback = _GBMemoryHDMAService;
205 gb->memory.hdmaEvent.priority = 0x41;
206
207 memset(&gb->memory.hram, 0, sizeof(gb->memory.hram));
208
209 GBMBCReset(gb);
210}
211
212void GBMemorySwitchWramBank(struct GBMemory* memory, int bank) {
213 bank &= 7;
214 if (!bank) {
215 bank = 1;
216 }
217 memory->wramBank = &memory->wram[GB_SIZE_WORKING_RAM_BANK0 * bank];
218 memory->wramCurrentBank = bank;
219}
220
221uint8_t GBLoad8(struct SM83Core* cpu, uint16_t address) {
222 struct GB* gb = (struct GB*) cpu->master;
223 struct GBMemory* memory = &gb->memory;
224 if (gb->memory.dmaRemaining) {
225 const enum GBBus* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
226 enum GBBus dmaBus = block[memory->dmaSource >> 13];
227 enum GBBus accessBus = block[address >> 13];
228 if (dmaBus != GB_BUS_CPU && dmaBus == accessBus) {
229 return 0xFF;
230 }
231 if (address >= GB_BASE_OAM && address < GB_BASE_IO) {
232 return 0xFF;
233 }
234 }
235 switch (address >> 12) {
236 case GB_REGION_CART_BANK0:
237 case GB_REGION_CART_BANK0 + 1:
238 case GB_REGION_CART_BANK0 + 2:
239 case GB_REGION_CART_BANK0 + 3:
240 if (address >= memory->romSize) {
241 return 0xFF;
242 }
243 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
244 case GB_REGION_CART_BANK1 + 2:
245 case GB_REGION_CART_BANK1 + 3:
246 if (memory->mbcType == GB_MBC6) {
247 return memory->mbcState.mbc6.romBank1[address & (GB_SIZE_CART_HALFBANK - 1)];
248 }
249 // Fall through
250 case GB_REGION_CART_BANK1:
251 case GB_REGION_CART_BANK1 + 1:
252 if (address >= memory->romSize) {
253 return 0xFF;
254 }
255 if ((memory->mbcType & GB_UNL_BBD) == GB_UNL_BBD) {
256 return memory->mbcRead(memory, address);
257 }
258 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
259 case GB_REGION_VRAM:
260 case GB_REGION_VRAM + 1:
261 if (gb->video.mode != 3) {
262 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
263 }
264 return 0xFF;
265 case GB_REGION_EXTERNAL_RAM:
266 case GB_REGION_EXTERNAL_RAM + 1:
267 if (memory->rtcAccess) {
268 return memory->rtcRegs[memory->activeRtcReg];
269 } else if (memory->mbcRead) {
270 return memory->mbcRead(memory, address);
271 } else if (memory->sramAccess && memory->sram) {
272 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
273 } else if (memory->mbcType == GB_HuC3) {
274 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
275 }
276 return 0xFF;
277 case GB_REGION_WORKING_RAM_BANK0:
278 case GB_REGION_WORKING_RAM_BANK0 + 2:
279 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
280 case GB_REGION_WORKING_RAM_BANK1:
281 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
282 default:
283 if (address < GB_BASE_OAM) {
284 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
285 }
286 if (address < GB_BASE_UNUSABLE) {
287 if (gb->video.mode < 2) {
288 return gb->video.oam.raw[address & 0xFF];
289 }
290 return 0xFF;
291 }
292 if (address < GB_BASE_IO) {
293 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
294 return 0xFF;
295 }
296 if (address < GB_BASE_HRAM) {
297 return GBIORead(gb, address & (GB_SIZE_IO - 1));
298 }
299 if (address < GB_BASE_IE) {
300 return memory->hram[address & GB_SIZE_HRAM];
301 }
302 return GBIORead(gb, GB_REG_IE);
303 }
304}
305
306void GBStore8(struct SM83Core* cpu, uint16_t address, int8_t value) {
307 struct GB* gb = (struct GB*) cpu->master;
308 struct GBMemory* memory = &gb->memory;
309 if (gb->memory.dmaRemaining) {
310 const enum GBBus* block = gb->model < GB_MODEL_CGB ? _oamBlockDMG : _oamBlockCGB;
311 enum GBBus dmaBus = block[memory->dmaSource >> 13];
312 enum GBBus accessBus = block[address >> 13];
313 if (dmaBus != GB_BUS_CPU && dmaBus == accessBus) {
314 return;
315 }
316 if (address >= GB_BASE_OAM && address < GB_BASE_UNUSABLE) {
317 return;
318 }
319 }
320 switch (address >> 12) {
321 case GB_REGION_CART_BANK0:
322 case GB_REGION_CART_BANK0 + 1:
323 case GB_REGION_CART_BANK0 + 2:
324 case GB_REGION_CART_BANK0 + 3:
325 case GB_REGION_CART_BANK1:
326 case GB_REGION_CART_BANK1 + 1:
327 case GB_REGION_CART_BANK1 + 2:
328 case GB_REGION_CART_BANK1 + 3:
329 memory->mbcWrite(gb, address, value);
330 cpu->memory.setActiveRegion(cpu, cpu->pc);
331 return;
332 case GB_REGION_VRAM:
333 case GB_REGION_VRAM + 1:
334 if (gb->video.mode != 3) {
335 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) | (GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank));
336 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
337 }
338 return;
339 case GB_REGION_EXTERNAL_RAM:
340 case GB_REGION_EXTERNAL_RAM + 1:
341 if (memory->rtcAccess) {
342 memory->rtcRegs[memory->activeRtcReg] = value;
343 } else if (memory->sramAccess && memory->sram && memory->directSramAccess) {
344 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
345 } else {
346 memory->mbcWrite(gb, address, value);
347 }
348 gb->sramDirty |= GB_SRAM_DIRT_NEW;
349 return;
350 case GB_REGION_WORKING_RAM_BANK0:
351 case GB_REGION_WORKING_RAM_BANK0 + 2:
352 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
353 return;
354 case GB_REGION_WORKING_RAM_BANK1:
355 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
356 return;
357 default:
358 if (address < GB_BASE_OAM) {
359 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
360 } else if (address < GB_BASE_UNUSABLE) {
361 if (gb->video.mode < 2) {
362 gb->video.oam.raw[address & 0xFF] = value;
363 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
364 }
365 } else if (address < GB_BASE_IO) {
366 mLOG(GB_MEM, GAME_ERROR, "Attempt to write to unusable memory: %04X:%02X", address, value);
367 } else if (address < GB_BASE_HRAM) {
368 GBIOWrite(gb, address & (GB_SIZE_IO - 1), value);
369 } else if (address < GB_BASE_IE) {
370 memory->hram[address & GB_SIZE_HRAM] = value;
371 } else {
372 GBIOWrite(gb, GB_REG_IE, value);
373 }
374 }
375}
376
377int GBCurrentSegment(struct SM83Core* cpu, uint16_t address) {
378 struct GB* gb = (struct GB*) cpu->master;
379 struct GBMemory* memory = &gb->memory;
380 switch (address >> 12) {
381 case GB_REGION_CART_BANK0:
382 case GB_REGION_CART_BANK0 + 1:
383 case GB_REGION_CART_BANK0 + 2:
384 case GB_REGION_CART_BANK0 + 3:
385 return 0;
386 case GB_REGION_CART_BANK1:
387 case GB_REGION_CART_BANK1 + 1:
388 case GB_REGION_CART_BANK1 + 2:
389 case GB_REGION_CART_BANK1 + 3:
390 return memory->currentBank;
391 case GB_REGION_VRAM:
392 case GB_REGION_VRAM + 1:
393 return gb->video.vramCurrentBank;
394 case GB_REGION_EXTERNAL_RAM:
395 case GB_REGION_EXTERNAL_RAM + 1:
396 return memory->sramCurrentBank;
397 case GB_REGION_WORKING_RAM_BANK0:
398 case GB_REGION_WORKING_RAM_BANK0 + 2:
399 return 0;
400 case GB_REGION_WORKING_RAM_BANK1:
401 return memory->wramCurrentBank;
402 default:
403 return 0;
404 }
405}
406
407uint8_t GBView8(struct SM83Core* cpu, uint16_t address, int segment) {
408 struct GB* gb = (struct GB*) cpu->master;
409 struct GBMemory* memory = &gb->memory;
410 switch (address >> 12) {
411 case GB_REGION_CART_BANK0:
412 case GB_REGION_CART_BANK0 + 1:
413 case GB_REGION_CART_BANK0 + 2:
414 case GB_REGION_CART_BANK0 + 3:
415 return memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
416 case GB_REGION_CART_BANK1:
417 case GB_REGION_CART_BANK1 + 1:
418 case GB_REGION_CART_BANK1 + 2:
419 case GB_REGION_CART_BANK1 + 3:
420 if (segment < 0) {
421 return memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
422 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
423 return memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
424 } else {
425 return 0xFF;
426 }
427 case GB_REGION_VRAM:
428 case GB_REGION_VRAM + 1:
429 if (segment < 0) {
430 return gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
431 } else if (segment < 2) {
432 return gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment *GB_SIZE_VRAM_BANK0];
433 } else {
434 return 0xFF;
435 }
436 case GB_REGION_EXTERNAL_RAM:
437 case GB_REGION_EXTERNAL_RAM + 1:
438 if (memory->rtcAccess) {
439 return memory->rtcRegs[memory->activeRtcReg];
440 } else if (memory->sramAccess) {
441 if (segment < 0 && memory->sram) {
442 return memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)];
443 } else if ((size_t) segment * GB_SIZE_EXTERNAL_RAM < gb->sramSize) {
444 return memory->sram[(address & (GB_SIZE_EXTERNAL_RAM - 1)) + segment *GB_SIZE_EXTERNAL_RAM];
445 } else {
446 return 0xFF;
447 }
448 } else if (memory->mbcRead) {
449 return memory->mbcRead(memory, address);
450 } else if (memory->mbcType == GB_HuC3) {
451 return 0x01; // TODO: Is this supposed to be the current SRAM bank?
452 }
453 return 0xFF;
454 case GB_REGION_WORKING_RAM_BANK0:
455 case GB_REGION_WORKING_RAM_BANK0 + 2:
456 return memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
457 case GB_REGION_WORKING_RAM_BANK1:
458 if (segment < 0) {
459 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
460 } else if (segment < 8) {
461 return memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment *GB_SIZE_WORKING_RAM_BANK0];
462 } else {
463 return 0xFF;
464 }
465 default:
466 if (address < GB_BASE_OAM) {
467 return memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
468 }
469 if (address < GB_BASE_UNUSABLE) {
470 if (gb->video.mode < 2) {
471 return gb->video.oam.raw[address & 0xFF];
472 }
473 return 0xFF;
474 }
475 if (address < GB_BASE_IO) {
476 mLOG(GB_MEM, GAME_ERROR, "Attempt to read from unusable memory: %04X", address);
477 if (gb->video.mode < 2) {
478 switch (gb->model) {
479 case GB_MODEL_AGB:
480 return (address & 0xF0) | ((address >> 4) & 0xF);
481 case GB_MODEL_CGB:
482 // TODO: R/W behavior
483 return 0x00;
484 default:
485 return 0x00;
486 }
487 }
488 return 0xFF;
489 }
490 if (address < GB_BASE_HRAM) {
491 return GBIORead(gb, address & (GB_SIZE_IO - 1));
492 }
493 if (address < GB_BASE_IE) {
494 return memory->hram[address & GB_SIZE_HRAM];
495 }
496 return GBIORead(gb, GB_REG_IE);
497 }
498}
499
500void GBMemoryDMA(struct GB* gb, uint16_t base) {
501 if (base >= 0xE000) {
502 base &= 0xDFFF;
503 }
504 mTimingDeschedule(&gb->timing, &gb->memory.dmaEvent);
505 mTimingSchedule(&gb->timing, &gb->memory.dmaEvent, 8 * (2 - gb->doubleSpeed));
506 gb->memory.dmaSource = base;
507 gb->memory.dmaDest = 0;
508 gb->memory.dmaRemaining = 0xA0;
509}
510
511uint8_t GBMemoryWriteHDMA5(struct GB* gb, uint8_t value) {
512 gb->memory.hdmaSource = gb->memory.io[GB_REG_HDMA1] << 8;
513 gb->memory.hdmaSource |= gb->memory.io[GB_REG_HDMA2];
514 gb->memory.hdmaDest = gb->memory.io[GB_REG_HDMA3] << 8;
515 gb->memory.hdmaDest |= gb->memory.io[GB_REG_HDMA4];
516 gb->memory.hdmaSource &= 0xFFF0;
517 if (gb->memory.hdmaSource >= 0x8000 && gb->memory.hdmaSource < 0xA000) {
518 mLOG(GB_MEM, GAME_ERROR, "Invalid HDMA source: %04X", gb->memory.hdmaSource);
519 return value | 0x80;
520 }
521 gb->memory.hdmaDest &= 0x1FF0;
522 gb->memory.hdmaDest |= 0x8000;
523 bool wasHdma = gb->memory.isHdma;
524 gb->memory.isHdma = value & 0x80;
525 if ((!wasHdma && !gb->memory.isHdma) || (GBRegisterLCDCIsEnable(gb->memory.io[GB_REG_LCDC]) && gb->video.mode == 0)) {
526 if (gb->memory.isHdma) {
527 gb->memory.hdmaRemaining = 0x10;
528 } else {
529 gb->memory.hdmaRemaining = ((value & 0x7F) + 1) * 0x10;
530 }
531 gb->cpuBlocked = true;
532 mTimingSchedule(&gb->timing, &gb->memory.hdmaEvent, 0);
533 } else if (gb->memory.isHdma && !GBRegisterLCDCIsEnable(gb->memory.io[GB_REG_LCDC])) {
534 return 0x80 | ((value + 1) & 0x7F);
535 }
536 return value & 0x7F;
537}
538
539void _GBMemoryDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
540 struct GB* gb = context;
541 int dmaRemaining = gb->memory.dmaRemaining;
542 gb->memory.dmaRemaining = 0;
543 uint8_t b = GBLoad8(gb->cpu, gb->memory.dmaSource);
544 // TODO: Can DMA write OAM during modes 2-3?
545 gb->video.oam.raw[gb->memory.dmaDest] = b;
546 gb->video.renderer->writeOAM(gb->video.renderer, gb->memory.dmaDest);
547 ++gb->memory.dmaSource;
548 ++gb->memory.dmaDest;
549 gb->memory.dmaRemaining = dmaRemaining - 1;
550 if (gb->memory.dmaRemaining) {
551 mTimingSchedule(timing, &gb->memory.dmaEvent, 4 * (2 - gb->doubleSpeed) - cyclesLate);
552 }
553}
554
555void _GBMemoryHDMAService(struct mTiming* timing, void* context, uint32_t cyclesLate) {
556 struct GB* gb = context;
557 gb->cpuBlocked = true;
558 uint8_t b = gb->cpu->memory.load8(gb->cpu, gb->memory.hdmaSource);
559 gb->cpu->memory.store8(gb->cpu, gb->memory.hdmaDest, b);
560 ++gb->memory.hdmaSource;
561 ++gb->memory.hdmaDest;
562 --gb->memory.hdmaRemaining;
563 if (gb->memory.hdmaRemaining) {
564 mTimingDeschedule(timing, &gb->memory.hdmaEvent);
565 mTimingSchedule(timing, &gb->memory.hdmaEvent, 4 - cyclesLate);
566 } else {
567 gb->cpuBlocked = false;
568 gb->memory.io[GB_REG_HDMA1] = gb->memory.hdmaSource >> 8;
569 gb->memory.io[GB_REG_HDMA2] = gb->memory.hdmaSource;
570 gb->memory.io[GB_REG_HDMA3] = gb->memory.hdmaDest >> 8;
571 gb->memory.io[GB_REG_HDMA4] = gb->memory.hdmaDest;
572 if (gb->memory.isHdma) {
573 --gb->memory.io[GB_REG_HDMA5];
574 if (gb->memory.io[GB_REG_HDMA5] == 0xFF) {
575 gb->memory.isHdma = false;
576 }
577 } else {
578 gb->memory.io[GB_REG_HDMA5] = 0xFF;
579 }
580 }
581}
582
583void GBPatch8(struct SM83Core* cpu, uint16_t address, int8_t value, int8_t* old, int segment) {
584 struct GB* gb = (struct GB*) cpu->master;
585 struct GBMemory* memory = &gb->memory;
586 int8_t oldValue = -1;
587
588 switch (address >> 12) {
589 case GB_REGION_CART_BANK0:
590 case GB_REGION_CART_BANK0 + 1:
591 case GB_REGION_CART_BANK0 + 2:
592 case GB_REGION_CART_BANK0 + 3:
593 _pristineCow(gb);
594 oldValue = memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)];
595 memory->romBase[address & (GB_SIZE_CART_BANK0 - 1)] = value;
596 break;
597 case GB_REGION_CART_BANK1:
598 case GB_REGION_CART_BANK1 + 1:
599 case GB_REGION_CART_BANK1 + 2:
600 case GB_REGION_CART_BANK1 + 3:
601 _pristineCow(gb);
602 if (segment < 0) {
603 oldValue = memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)];
604 memory->romBank[address & (GB_SIZE_CART_BANK0 - 1)] = value;
605 } else if ((size_t) segment * GB_SIZE_CART_BANK0 < memory->romSize) {
606 oldValue = memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0];
607 memory->rom[(address & (GB_SIZE_CART_BANK0 - 1)) + segment * GB_SIZE_CART_BANK0] = value;
608 } else {
609 return;
610 }
611 break;
612 case GB_REGION_VRAM:
613 case GB_REGION_VRAM + 1:
614 if (segment < 0) {
615 oldValue = gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)];
616 gb->video.vramBank[address & (GB_SIZE_VRAM_BANK0 - 1)] = value;
617 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + GB_SIZE_VRAM_BANK0 * gb->video.vramCurrentBank);
618 } else if (segment < 2) {
619 oldValue = gb->video.vram[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0];
620 gb->video.vramBank[(address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0] = value;
621 gb->video.renderer->writeVRAM(gb->video.renderer, (address & (GB_SIZE_VRAM_BANK0 - 1)) + segment * GB_SIZE_VRAM_BANK0);
622 } else {
623 return;
624 }
625 break;
626 case GB_REGION_EXTERNAL_RAM:
627 case GB_REGION_EXTERNAL_RAM + 1:
628 if (memory->rtcAccess) {
629 memory->rtcRegs[memory->activeRtcReg] = value;
630 } else if (memory->sramAccess && memory->sram && memory->mbcType != GB_MBC2) {
631 // TODO: Remove sramAccess check?
632 memory->sramBank[address & (GB_SIZE_EXTERNAL_RAM - 1)] = value;
633 } else {
634 memory->mbcWrite(gb, address, value);
635 }
636 gb->sramDirty |= GB_SRAM_DIRT_NEW;
637 return;
638 case GB_REGION_WORKING_RAM_BANK0:
639 case GB_REGION_WORKING_RAM_BANK0 + 2:
640 oldValue = memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
641 memory->wram[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
642 break;
643 case GB_REGION_WORKING_RAM_BANK1:
644 if (segment < 0) {
645 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
646 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
647 } else if (segment < 8) {
648 oldValue = memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0];
649 memory->wram[(address & (GB_SIZE_WORKING_RAM_BANK0 - 1)) + segment * GB_SIZE_WORKING_RAM_BANK0] = value;
650 } else {
651 return;
652 }
653 break;
654 default:
655 if (address < GB_BASE_OAM) {
656 oldValue = memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)];
657 memory->wramBank[address & (GB_SIZE_WORKING_RAM_BANK0 - 1)] = value;
658 } else if (address < GB_BASE_UNUSABLE) {
659 oldValue = gb->video.oam.raw[address & 0xFF];
660 gb->video.oam.raw[address & 0xFF] = value;
661 gb->video.renderer->writeOAM(gb->video.renderer, address & 0xFF);
662 } else if (address < GB_BASE_HRAM) {
663 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
664 return;
665 } else if (address < GB_BASE_IE) {
666 oldValue = memory->hram[address & GB_SIZE_HRAM];
667 memory->hram[address & GB_SIZE_HRAM] = value;
668 } else {
669 mLOG(GB_MEM, STUB, "Unimplemented memory Patch8: 0x%08X", address);
670 return;
671 }
672 }
673 if (old) {
674 *old = oldValue;
675 }
676}
677
678void GBMemorySerialize(const struct GB* gb, struct GBSerializedState* state) {
679 const struct GBMemory* memory = &gb->memory;
680 memcpy(state->wram, memory->wram, GB_SIZE_WORKING_RAM);
681 memcpy(state->hram, memory->hram, GB_SIZE_HRAM);
682 STORE_16LE(memory->currentBank, 0, &state->memory.currentBank);
683 state->memory.wramCurrentBank = memory->wramCurrentBank;
684 state->memory.sramCurrentBank = memory->sramCurrentBank;
685
686 STORE_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
687 STORE_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
688
689 STORE_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
690 STORE_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
691
692 STORE_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
693 state->memory.dmaRemaining = memory->dmaRemaining;
694 memcpy(state->memory.rtcRegs, memory->rtcRegs, sizeof(state->memory.rtcRegs));
695
696 STORE_32LE(memory->dmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.dmaNext);
697 STORE_32LE(memory->hdmaEvent.when - mTimingCurrentTime(&gb->timing), 0, &state->memory.hdmaNext);
698
699 GBSerializedMemoryFlags flags = 0;
700 flags = GBSerializedMemoryFlagsSetSramAccess(flags, memory->sramAccess);
701 flags = GBSerializedMemoryFlagsSetRtcAccess(flags, memory->rtcAccess);
702 flags = GBSerializedMemoryFlagsSetRtcLatched(flags, memory->rtcLatched);
703 flags = GBSerializedMemoryFlagsSetIme(flags, memory->ime);
704 flags = GBSerializedMemoryFlagsSetIsHdma(flags, memory->isHdma);
705 flags = GBSerializedMemoryFlagsSetActiveRtcReg(flags, memory->activeRtcReg);
706 STORE_16LE(flags, 0, &state->memory.flags);
707
708 switch (memory->mbcType) {
709 case GB_MBC1:
710 state->memory.mbc1.mode = memory->mbcState.mbc1.mode;
711 state->memory.mbc1.multicartStride = memory->mbcState.mbc1.multicartStride;
712 state->memory.mbc1.bankLo = memory->mbcState.mbc1.bankLo;
713 state->memory.mbc1.bankHi = memory->mbcState.mbc1.bankHi;
714 break;
715 case GB_MBC3_RTC:
716 STORE_64LE(gb->memory.rtcLastLatch, 0, &state->memory.rtc.lastLatch);
717 break;
718 case GB_MBC7:
719 state->memory.mbc7.state = memory->mbcState.mbc7.state;
720 state->memory.mbc7.eeprom = memory->mbcState.mbc7.eeprom;
721 state->memory.mbc7.address = memory->mbcState.mbc7.address;
722 state->memory.mbc7.access = memory->mbcState.mbc7.access;
723 state->memory.mbc7.latch = memory->mbcState.mbc7.latch;
724 state->memory.mbc7.srBits = memory->mbcState.mbc7.srBits;
725 STORE_16LE(memory->mbcState.mbc7.sr, 0, &state->memory.mbc7.sr);
726 STORE_32LE(memory->mbcState.mbc7.writable, 0, &state->memory.mbc7.writable);
727 break;
728 case GB_MMM01:
729 state->memory.mmm01.locked = memory->mbcState.mmm01.locked;
730 state->memory.mmm01.bank0 = memory->mbcState.mmm01.currentBank0;
731 break;
732 case GB_UNL_BBD:
733 case GB_UNL_HITEK:
734 state->memory.bbd.dataSwapMode = memory->mbcState.bbd.dataSwapMode;
735 state->memory.bbd.bankSwapMode = memory->mbcState.bbd.bankSwapMode;
736 break;
737 default:
738 break;
739 }
740}
741
742void GBMemoryDeserialize(struct GB* gb, const struct GBSerializedState* state) {
743 struct GBMemory* memory = &gb->memory;
744 memcpy(memory->wram, state->wram, GB_SIZE_WORKING_RAM);
745 memcpy(memory->hram, state->hram, GB_SIZE_HRAM);
746 LOAD_16LE(memory->currentBank, 0, &state->memory.currentBank);
747 memory->wramCurrentBank = state->memory.wramCurrentBank;
748 memory->sramCurrentBank = state->memory.sramCurrentBank;
749
750 GBMBCSwitchBank(gb, memory->currentBank);
751 GBMemorySwitchWramBank(memory, memory->wramCurrentBank);
752 GBMBCSwitchSramBank(gb, memory->sramCurrentBank);
753
754 LOAD_16LE(memory->dmaSource, 0, &state->memory.dmaSource);
755 LOAD_16LE(memory->dmaDest, 0, &state->memory.dmaDest);
756
757 LOAD_16LE(memory->hdmaSource, 0, &state->memory.hdmaSource);
758 LOAD_16LE(memory->hdmaDest, 0, &state->memory.hdmaDest);
759
760 LOAD_16LE(memory->hdmaRemaining, 0, &state->memory.hdmaRemaining);
761 memory->dmaRemaining = state->memory.dmaRemaining;
762 memcpy(memory->rtcRegs, state->memory.rtcRegs, sizeof(state->memory.rtcRegs));
763
764 uint32_t when;
765 LOAD_32LE(when, 0, &state->memory.dmaNext);
766 if (memory->dmaRemaining) {
767 mTimingSchedule(&gb->timing, &memory->dmaEvent, when);
768 } else {
769 memory->dmaEvent.when = when + mTimingCurrentTime(&gb->timing);
770 }
771 LOAD_32LE(when, 0, &state->memory.hdmaNext);
772 if (memory->hdmaRemaining) {
773 mTimingSchedule(&gb->timing, &memory->hdmaEvent, when);
774 } else {
775 memory->hdmaEvent.when = when + mTimingCurrentTime(&gb->timing);
776 }
777
778 GBSerializedMemoryFlags flags;
779 LOAD_16LE(flags, 0, &state->memory.flags);
780 memory->sramAccess = GBSerializedMemoryFlagsGetSramAccess(flags);
781 memory->rtcAccess = GBSerializedMemoryFlagsGetRtcAccess(flags);
782 memory->rtcLatched = GBSerializedMemoryFlagsGetRtcLatched(flags);
783 memory->ime = GBSerializedMemoryFlagsGetIme(flags);
784 memory->isHdma = GBSerializedMemoryFlagsGetIsHdma(flags);
785 memory->activeRtcReg = GBSerializedMemoryFlagsGetActiveRtcReg(flags);
786
787 switch (memory->mbcType) {
788 case GB_MBC1:
789 memory->mbcState.mbc1.mode = state->memory.mbc1.mode;
790 memory->mbcState.mbc1.multicartStride = state->memory.mbc1.multicartStride;
791 memory->mbcState.mbc1.bankLo = state->memory.mbc1.bankLo;
792 memory->mbcState.mbc1.bankHi = state->memory.mbc1.bankHi;
793 if (!(memory->mbcState.mbc1.bankLo || memory->mbcState.mbc1.bankHi)) {
794 // Backwards compat
795 memory->mbcState.mbc1.bankLo = memory->currentBank & ((1 << memory->mbcState.mbc1.multicartStride) - 1);
796 memory->mbcState.mbc1.bankHi = memory->currentBank >> memory->mbcState.mbc1.multicartStride;
797 }
798 if (memory->mbcState.mbc1.mode) {
799 GBMBCSwitchBank0(gb, memory->mbcState.mbc1.bankHi);
800 }
801 break;
802 case GB_MBC3_RTC:
803 LOAD_64LE(gb->memory.rtcLastLatch, 0, &state->memory.rtc.lastLatch);
804 break;
805 case GB_MBC7:
806 memory->mbcState.mbc7.state = state->memory.mbc7.state;
807 memory->mbcState.mbc7.eeprom = state->memory.mbc7.eeprom;
808 memory->mbcState.mbc7.address = state->memory.mbc7.address & 0x7F;
809 memory->mbcState.mbc7.access = state->memory.mbc7.access;
810 memory->mbcState.mbc7.latch = state->memory.mbc7.latch;
811 memory->mbcState.mbc7.srBits = state->memory.mbc7.srBits;
812 LOAD_16LE(memory->mbcState.mbc7.sr, 0, &state->memory.mbc7.sr);
813 LOAD_32LE(memory->mbcState.mbc7.writable, 0, &state->memory.mbc7.writable);
814 break;
815 case GB_MMM01:
816 memory->mbcState.mmm01.locked = state->memory.mmm01.locked;
817 memory->mbcState.mmm01.currentBank0 = state->memory.mmm01.bank0;
818 if (memory->mbcState.mmm01.locked) {
819 GBMBCSwitchBank0(gb, memory->mbcState.mmm01.currentBank0);
820 } else {
821 GBMBCSwitchBank0(gb, gb->memory.romSize / GB_SIZE_CART_BANK0 - 2);
822 }
823 break;
824 case GB_UNL_BBD:
825 case GB_UNL_HITEK:
826 memory->mbcState.bbd.dataSwapMode = state->memory.bbd.dataSwapMode & 0x7;
827 memory->mbcState.bbd.bankSwapMode = state->memory.bbd.bankSwapMode & 0x7;
828 break;
829 default:
830 break;
831 }
832}
833
834void _pristineCow(struct GB* gb) {
835 if (!gb->isPristine) {
836 return;
837 }
838 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
839 memcpy(newRom, gb->memory.rom, gb->memory.romSize);
840 memset(((uint8_t*) newRom) + gb->memory.romSize, 0xFF, GB_SIZE_CART_MAX - gb->memory.romSize);
841 if (gb->memory.rom == gb->memory.romBase) {
842 gb->memory.romBase = newRom;
843 }
844 gb->memory.rom = newRom;
845 GBMBCSwitchBank(gb, gb->memory.currentBank);
846 gb->isPristine = false;
847}