src/gb/gb.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/gb.h>
7
8#include <mgba/internal/gb/io.h>
9#include <mgba/internal/gb/mbc.h>
10#include <mgba/internal/sm83/sm83.h>
11
12#include <mgba/core/core.h>
13#include <mgba/core/cheats.h>
14#include <mgba-util/crc32.h>
15#include <mgba-util/memory.h>
16#include <mgba-util/math.h>
17#include <mgba-util/patch.h>
18#include <mgba-util/vfs.h>
19
20#define CLEANUP_THRESHOLD 15
21
22const uint32_t CGB_SM83_FREQUENCY = 0x800000;
23const uint32_t SGB_SM83_FREQUENCY = 0x418B1E;
24
25const uint32_t GB_COMPONENT_MAGIC = 0x400000;
26
27static const uint8_t _knownHeader[4] = { 0xCE, 0xED, 0x66, 0x66};
28
29#define DMG_BIOS_CHECKSUM 0xC2F5CC97
30#define DMG_2_BIOS_CHECKSUM 0x59C8598E
31#define MGB_BIOS_CHECKSUM 0xE6920754
32#define SGB_BIOS_CHECKSUM 0xEC8A83B9
33#define SGB2_BIOS_CHECKSUM 0X53D0DD63
34#define CGB_BIOS_CHECKSUM 0x41884E46
35
36mLOG_DEFINE_CATEGORY(GB, "GB", "gb");
37
38static void GBInit(void* cpu, struct mCPUComponent* component);
39static void GBDeinit(struct mCPUComponent* component);
40static void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh);
41static void GBProcessEvents(struct SM83Core* cpu);
42static void GBSetInterrupts(struct SM83Core* cpu, bool enable);
43static uint16_t GBIRQVector(struct SM83Core* cpu);
44static void GBIllegal(struct SM83Core* cpu);
45static void GBStop(struct SM83Core* cpu);
46
47static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate);
48
49void GBCreate(struct GB* gb) {
50 gb->d.id = GB_COMPONENT_MAGIC;
51 gb->d.init = GBInit;
52 gb->d.deinit = GBDeinit;
53}
54
55static void GBInit(void* cpu, struct mCPUComponent* component) {
56 struct GB* gb = (struct GB*) component;
57 gb->cpu = cpu;
58 gb->sync = NULL;
59
60 GBInterruptHandlerInit(&gb->cpu->irqh);
61 GBMemoryInit(gb);
62
63 gb->video.p = gb;
64 GBVideoInit(&gb->video);
65
66 gb->audio.p = gb;
67 GBAudioInit(&gb->audio, 2048, &gb->memory.io[GB_REG_NR52], GB_AUDIO_DMG); // TODO: Remove magic constant
68
69 gb->sio.p = gb;
70 GBSIOInit(&gb->sio);
71
72 gb->timer.p = gb;
73
74 gb->model = GB_MODEL_AUTODETECT;
75
76 gb->biosVf = NULL;
77 gb->romVf = NULL;
78 gb->sramVf = NULL;
79 gb->sramRealVf = NULL;
80
81 gb->isPristine = false;
82 gb->pristineRomSize = 0;
83 gb->yankedRomSize = 0;
84
85 mCoreCallbacksListInit(&gb->coreCallbacks, 0);
86 gb->stream = NULL;
87
88 mTimingInit(&gb->timing, &gb->cpu->cycles, &gb->cpu->nextEvent);
89 gb->audio.timing = &gb->timing;
90
91 gb->eiPending.name = "GB EI";
92 gb->eiPending.callback = _enableInterrupts;
93 gb->eiPending.context = gb;
94 gb->eiPending.priority = 0;
95}
96
97static void GBDeinit(struct mCPUComponent* component) {
98 struct GB* gb = (struct GB*) component;
99 mTimingDeinit(&gb->timing);
100}
101
102bool GBLoadROM(struct GB* gb, struct VFile* vf) {
103 if (!vf) {
104 return false;
105 }
106 GBUnloadROM(gb);
107 gb->romVf = vf;
108 gb->pristineRomSize = vf->size(vf);
109 vf->seek(vf, 0, SEEK_SET);
110 gb->isPristine = true;
111 gb->memory.rom = vf->map(vf, gb->pristineRomSize, MAP_READ);
112 if (!gb->memory.rom) {
113 return false;
114 }
115 gb->yankedRomSize = 0;
116 gb->memory.romBase = gb->memory.rom;
117 gb->memory.romSize = gb->pristineRomSize;
118 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
119 GBMBCInit(gb);
120
121 if (gb->cpu) {
122 GBMBCSwitchBank(gb, 0);
123 struct SM83Core* cpu = gb->cpu;
124 cpu->memory.setActiveRegion(cpu, cpu->pc);
125 }
126
127 // TODO: error check
128 return true;
129}
130
131void GBYankROM(struct GB* gb) {
132 gb->yankedRomSize = gb->memory.romSize;
133 gb->yankedMbc = gb->memory.mbcType;
134 gb->memory.romSize = 0;
135 gb->memory.mbcType = GB_MBC_NONE;
136 gb->memory.sramAccess = false;
137
138 if (gb->cpu) {
139 struct SM83Core* cpu = gb->cpu;
140 cpu->memory.setActiveRegion(cpu, cpu->pc);
141 }
142}
143
144static void GBSramDeinit(struct GB* gb) {
145 if (gb->sramVf) {
146 gb->sramVf->unmap(gb->sramVf, gb->memory.sram, gb->sramSize);
147 if (gb->memory.mbcType == GB_MBC3_RTC && gb->sramVf == gb->sramRealVf) {
148 GBMBCRTCWrite(gb);
149 }
150 gb->sramVf = NULL;
151 } else if (gb->memory.sram) {
152 mappedMemoryFree(gb->memory.sram, gb->sramSize);
153 }
154 gb->memory.sram = 0;
155}
156
157bool GBLoadSave(struct GB* gb, struct VFile* vf) {
158 GBSramDeinit(gb);
159 gb->sramVf = vf;
160 gb->sramRealVf = vf;
161 if (gb->sramSize) {
162 GBResizeSram(gb, gb->sramSize);
163 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
164
165 if (gb->memory.mbcType == GB_MBC3_RTC) {
166 GBMBCRTCRead(gb);
167 }
168 }
169 return vf;
170}
171
172void GBResizeSram(struct GB* gb, size_t size) {
173 if (gb->memory.sram && size <= gb->sramSize) {
174 return;
175 }
176 struct VFile* vf = gb->sramVf;
177 if (vf) {
178 if (vf == gb->sramRealVf) {
179 ssize_t vfSize = vf->size(vf);
180 if (vfSize >= 0 && (size_t) vfSize < size) {
181 uint8_t extdataBuffer[0x100];
182 if (vfSize & 0xFF) {
183 vf->seek(vf, -(vfSize & 0xFF), SEEK_END);
184 vf->read(vf, extdataBuffer, vfSize & 0xFF);
185 }
186 if (gb->memory.sram) {
187 vf->unmap(vf, gb->memory.sram, gb->sramSize);
188 }
189 vf->truncate(vf, size + (vfSize & 0xFF));
190 if (vfSize & 0xFF) {
191 vf->seek(vf, size, SEEK_SET);
192 vf->write(vf, extdataBuffer, vfSize & 0xFF);
193 }
194 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
195 memset(&gb->memory.sram[vfSize], 0xFF, size - vfSize);
196 } else if (size > gb->sramSize || !gb->memory.sram) {
197 if (gb->memory.sram) {
198 vf->unmap(vf, gb->memory.sram, gb->sramSize);
199 }
200 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
201 }
202 } else {
203 if (gb->memory.sram) {
204 vf->unmap(vf, gb->memory.sram, gb->sramSize);
205 }
206 gb->memory.sram = vf->map(vf, size, MAP_READ);
207 }
208 if (gb->memory.sram == (void*) -1) {
209 gb->memory.sram = NULL;
210 }
211 } else if (size) {
212 uint8_t* newSram = anonymousMemoryMap(size);
213 if (gb->memory.sram) {
214 if (size > gb->sramSize) {
215 memcpy(newSram, gb->memory.sram, gb->sramSize);
216 memset(&newSram[gb->sramSize], 0xFF, size - gb->sramSize);
217 } else {
218 memcpy(newSram, gb->memory.sram, size);
219 }
220 mappedMemoryFree(gb->memory.sram, gb->sramSize);
221 } else {
222 memset(newSram, 0xFF, size);
223 }
224 gb->memory.sram = newSram;
225 }
226 if (gb->sramSize < size) {
227 gb->sramSize = size;
228 }
229}
230
231void GBSramClean(struct GB* gb, uint32_t frameCount) {
232 // TODO: Share with GBASavedataClean
233 if (!gb->sramVf) {
234 return;
235 }
236 if (gb->sramDirty & GB_SRAM_DIRT_NEW) {
237 gb->sramDirtAge = frameCount;
238 gb->sramDirty &= ~GB_SRAM_DIRT_NEW;
239 if (!(gb->sramDirty & GB_SRAM_DIRT_SEEN)) {
240 gb->sramDirty |= GB_SRAM_DIRT_SEEN;
241 }
242 } else if ((gb->sramDirty & GB_SRAM_DIRT_SEEN) && frameCount - gb->sramDirtAge > CLEANUP_THRESHOLD) {
243 if (gb->sramMaskWriteback) {
244 GBSavedataUnmask(gb);
245 }
246 if (gb->memory.mbcType == GB_MBC3_RTC) {
247 GBMBCRTCWrite(gb);
248 }
249 gb->sramDirty = 0;
250 if (gb->memory.sram && gb->sramVf->sync(gb->sramVf, gb->memory.sram, gb->sramSize)) {
251 mLOG(GB_MEM, INFO, "Savedata synced");
252 } else {
253 mLOG(GB_MEM, INFO, "Savedata failed to sync!");
254 }
255
256 size_t c;
257 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
258 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
259 if (callbacks->savedataUpdated) {
260 callbacks->savedataUpdated(callbacks->context);
261 }
262 }
263 }
264}
265
266void GBSavedataMask(struct GB* gb, struct VFile* vf, bool writeback) {
267 struct VFile* oldVf = gb->sramVf;
268 GBSramDeinit(gb);
269 if (oldVf && oldVf != gb->sramRealVf) {
270 oldVf->close(oldVf);
271 }
272 gb->sramVf = vf;
273 gb->sramMaskWriteback = writeback;
274 gb->memory.sram = vf->map(vf, gb->sramSize, MAP_READ);
275 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
276}
277
278void GBSavedataUnmask(struct GB* gb) {
279 if (!gb->sramRealVf || gb->sramVf == gb->sramRealVf) {
280 return;
281 }
282 struct VFile* vf = gb->sramVf;
283 GBSramDeinit(gb);
284 gb->sramVf = gb->sramRealVf;
285 gb->memory.sram = gb->sramVf->map(gb->sramVf, gb->sramSize, MAP_WRITE);
286 if (gb->sramMaskWriteback) {
287 vf->seek(vf, 0, SEEK_SET);
288 vf->read(vf, gb->memory.sram, gb->sramSize);
289 gb->sramMaskWriteback = false;
290 }
291 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
292 vf->close(vf);
293}
294
295void GBUnloadROM(struct GB* gb) {
296 // TODO: Share with GBAUnloadROM
297 if (gb->memory.rom && gb->memory.romBase != gb->memory.rom && !gb->isPristine) {
298 free(gb->memory.romBase);
299 }
300 if (gb->memory.rom && !gb->isPristine) {
301 if (gb->yankedRomSize) {
302 gb->yankedRomSize = 0;
303 }
304 mappedMemoryFree(gb->memory.rom, GB_SIZE_CART_MAX);
305 }
306
307 if (gb->romVf) {
308#ifndef FIXED_ROM_BUFFER
309 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
310#endif
311 gb->romVf->close(gb->romVf);
312 gb->romVf = NULL;
313 }
314 gb->memory.rom = NULL;
315 gb->memory.mbcType = GB_MBC_AUTODETECT;
316 gb->isPristine = false;
317
318 gb->sramMaskWriteback = false;
319 GBSramDeinit(gb);
320 if (gb->sramRealVf) {
321 gb->sramRealVf->close(gb->sramRealVf);
322 }
323 gb->sramRealVf = NULL;
324 gb->sramVf = NULL;
325 if (gb->memory.cam && gb->memory.cam->stopRequestImage) {
326 gb->memory.cam->stopRequestImage(gb->memory.cam);
327 }
328}
329
330void GBSynthesizeROM(struct VFile* vf) {
331 if (!vf) {
332 return;
333 }
334 const struct GBCartridge cart = {
335 .logo = { _knownHeader[0], _knownHeader[1], _knownHeader[2], _knownHeader[3]}
336 };
337
338 vf->seek(vf, 0x100, SEEK_SET);
339 vf->write(vf, &cart, sizeof(cart));
340}
341
342void GBLoadBIOS(struct GB* gb, struct VFile* vf) {
343 gb->biosVf = vf;
344}
345
346void GBApplyPatch(struct GB* gb, struct Patch* patch) {
347 size_t patchedSize = patch->outputSize(patch, gb->memory.romSize);
348 if (!patchedSize) {
349 return;
350 }
351 if (patchedSize > GB_SIZE_CART_MAX) {
352 patchedSize = GB_SIZE_CART_MAX;
353 }
354 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
355 if (!patch->applyPatch(patch, gb->memory.rom, gb->pristineRomSize, newRom, patchedSize)) {
356 mappedMemoryFree(newRom, GB_SIZE_CART_MAX);
357 return;
358 }
359 if (gb->romVf) {
360#ifndef FIXED_ROM_BUFFER
361 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
362#endif
363 gb->romVf->close(gb->romVf);
364 gb->romVf = NULL;
365 }
366 gb->isPristine = false;
367 if (gb->memory.romBase == gb->memory.rom) {
368 gb->memory.romBase = newRom;
369 }
370 gb->memory.rom = newRom;
371 gb->memory.romSize = patchedSize;
372 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
373 gb->cpu->memory.setActiveRegion(gb->cpu, gb->cpu->pc);
374}
375
376void GBDestroy(struct GB* gb) {
377 GBUnloadROM(gb);
378
379 if (gb->biosVf) {
380 gb->biosVf->close(gb->biosVf);
381 gb->biosVf = 0;
382 }
383
384 GBMemoryDeinit(gb);
385 GBAudioDeinit(&gb->audio);
386 GBVideoDeinit(&gb->video);
387 GBSIODeinit(&gb->sio);
388 mCoreCallbacksListDeinit(&gb->coreCallbacks);
389}
390
391void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh) {
392 irqh->reset = GBReset;
393 irqh->processEvents = GBProcessEvents;
394 irqh->setInterrupts = GBSetInterrupts;
395 irqh->irqVector = GBIRQVector;
396 irqh->hitIllegal = GBIllegal;
397 irqh->stop = GBStop;
398 irqh->halt = GBHalt;
399}
400
401static uint32_t _GBBiosCRC32(struct VFile* vf) {
402 ssize_t size = vf->size(vf);
403 if (size <= 0 || size > GB_SIZE_CART_BANK0) {
404 return 0;
405 }
406 void* bios = vf->map(vf, size, MAP_READ);
407 uint32_t biosCrc = doCrc32(bios, size);
408 vf->unmap(vf, bios, size);
409 return biosCrc;
410}
411
412bool GBIsBIOS(struct VFile* vf) {
413 switch (_GBBiosCRC32(vf)) {
414 case DMG_BIOS_CHECKSUM:
415 case DMG_2_BIOS_CHECKSUM:
416 case MGB_BIOS_CHECKSUM:
417 case SGB_BIOS_CHECKSUM:
418 case SGB2_BIOS_CHECKSUM:
419 case CGB_BIOS_CHECKSUM:
420 return true;
421 default:
422 return false;
423 }
424}
425
426void GBReset(struct SM83Core* cpu) {
427 struct GB* gb = (struct GB*) cpu->master;
428 gb->memory.romBase = gb->memory.rom;
429 GBDetectModel(gb);
430
431 cpu->b = 0;
432 cpu->d = 0;
433
434 gb->timer.internalDiv = 0;
435
436 gb->cpuBlocked = false;
437 gb->earlyExit = false;
438 gb->doubleSpeed = 0;
439
440 if (gb->yankedRomSize) {
441 gb->memory.romSize = gb->yankedRomSize;
442 gb->memory.mbcType = gb->yankedMbc;
443 gb->yankedRomSize = 0;
444 }
445
446 gb->sgbBit = -1;
447 gb->sgbControllers = 0;
448 gb->sgbCurrentController = 0;
449 gb->currentSgbBits = 0;
450 gb->sgbIncrement = false;
451 memset(gb->sgbPacket, 0, sizeof(gb->sgbPacket));
452
453 mTimingClear(&gb->timing);
454
455 GBMemoryReset(gb);
456
457 if (gb->biosVf) {
458 if (!GBIsBIOS(gb->biosVf)) {
459 gb->biosVf->close(gb->biosVf);
460 gb->biosVf = NULL;
461 } else {
462 GBMapBIOS(gb);
463 cpu->a = 0;
464 cpu->f.packed = 0;
465 cpu->c = 0;
466 cpu->e = 0;
467 cpu->h = 0;
468 cpu->l = 0;
469 cpu->sp = 0;
470 cpu->pc = 0;
471 }
472 }
473
474 GBVideoReset(&gb->video);
475 GBTimerReset(&gb->timer);
476 GBIOReset(gb);
477 if (!gb->biosVf && gb->memory.rom) {
478 GBSkipBIOS(gb);
479 } else {
480 mTimingSchedule(&gb->timing, &gb->timer.event, 0);
481 }
482
483 GBAudioReset(&gb->audio);
484 GBSIOReset(&gb->sio);
485
486 cpu->memory.setActiveRegion(cpu, cpu->pc);
487
488 gb->sramMaskWriteback = false;
489 GBSavedataUnmask(gb);
490}
491
492void GBSkipBIOS(struct GB* gb) {
493 struct SM83Core* cpu = gb->cpu;
494 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
495 int nextDiv = 0;
496
497 switch (gb->model) {
498 case GB_MODEL_AUTODETECT: // Silence warnings
499 gb->model = GB_MODEL_DMG;
500 // Fall through
501 case GB_MODEL_DMG:
502 cpu->a = 1;
503 cpu->f.packed = 0xB0;
504 cpu->c = 0x13;
505 cpu->e = 0xD8;
506 cpu->h = 1;
507 cpu->l = 0x4D;
508 gb->timer.internalDiv = 0xABC;
509 nextDiv = 4;
510 break;
511 case GB_MODEL_SGB:
512 cpu->a = 1;
513 cpu->f.packed = 0x00;
514 cpu->c = 0x14;
515 cpu->e = 0x00;
516 cpu->h = 0xC0;
517 cpu->l = 0x60;
518 gb->timer.internalDiv = 0xD85;
519 nextDiv = 8;
520 break;
521 case GB_MODEL_MGB:
522 cpu->a = 0xFF;
523 cpu->f.packed = 0xB0;
524 cpu->c = 0x13;
525 cpu->e = 0xD8;
526 cpu->h = 1;
527 cpu->l = 0x4D;
528 gb->timer.internalDiv = 0xABC;
529 nextDiv = 4;
530 break;
531 case GB_MODEL_SGB2:
532 cpu->a = 0xFF;
533 cpu->f.packed = 0x00;
534 cpu->c = 0x14;
535 cpu->e = 0x00;
536 cpu->h = 0xC0;
537 cpu->l = 0x60;
538 gb->timer.internalDiv = 0xD84;
539 nextDiv = 8;
540 break;
541 case GB_MODEL_AGB:
542 cpu->b = 1;
543 // Fall through
544 case GB_MODEL_CGB:
545 cpu->a = 0x11;
546 if (gb->model == GB_MODEL_AGB) {
547 cpu->f.packed = 0x00;
548 } else {
549 cpu->f.packed = 0x80;
550 }
551 cpu->c = 0;
552 cpu->h = 0;
553 if (cart->cgb & 0x80) {
554 cpu->d = 0xFF;
555 cpu->e = 0x56;
556 cpu->l = 0x0D;
557 gb->timer.internalDiv = 0x2F0;
558 } else {
559 cpu->e = 0x08;
560 cpu->l = 0x7C;
561 gb->timer.internalDiv = 0x260;
562 gb->model = GB_MODEL_DMG;
563 gb->memory.io[GB_REG_KEY1] = 0xFF;
564 gb->memory.io[GB_REG_BCPS] = 0x88; // Faked writing 4 BG palette entries
565 gb->memory.io[GB_REG_OCPS] = 0x90; // Faked writing 8 OBJ palette entries
566 gb->memory.io[GB_REG_SVBK] = 0xFF;
567 GBVideoDisableCGB(&gb->video);
568 }
569 nextDiv = 0xC;
570 break;
571 }
572
573 cpu->sp = 0xFFFE;
574 cpu->pc = 0x100;
575
576 gb->timer.nextDiv = GB_DMG_DIV_PERIOD * (16 - nextDiv);
577
578 mTimingDeschedule(&gb->timing, &gb->timer.event);
579 mTimingSchedule(&gb->timing, &gb->timer.event, gb->timer.nextDiv);
580
581 GBIOWrite(gb, GB_REG_LCDC, 0x91);
582 GBVideoSkipBIOS(&gb->video);
583
584 if (gb->biosVf) {
585 GBUnmapBIOS(gb);
586 }
587}
588
589void GBMapBIOS(struct GB* gb) {
590 gb->biosVf->seek(gb->biosVf, 0, SEEK_SET);
591 uint8_t* oldRomBase = gb->memory.romBase;
592 gb->memory.romBase = malloc(GB_SIZE_CART_BANK0);
593 ssize_t size = gb->biosVf->read(gb->biosVf, gb->memory.romBase, GB_SIZE_CART_BANK0);
594 memcpy(&gb->memory.romBase[size], &oldRomBase[size], GB_SIZE_CART_BANK0 - size);
595 if (size > 0x100) {
596 memcpy(&gb->memory.romBase[0x100], &oldRomBase[0x100], sizeof(struct GBCartridge));
597 }
598}
599
600void GBUnmapBIOS(struct GB* gb) {
601 if (gb->memory.romBase < gb->memory.rom || gb->memory.romBase > &gb->memory.rom[gb->memory.romSize - 1]) {
602 free(gb->memory.romBase);
603 if (gb->memory.mbcType == GB_MMM01) {
604 GBMBCSwitchBank0(gb, gb->memory.romSize / GB_SIZE_CART_BANK0 - 2);
605 } else {
606 GBMBCSwitchBank0(gb, 0);
607 }
608 }
609 // XXX: Force AGB registers for AGB-mode
610 if (gb->model == GB_MODEL_AGB && gb->cpu->pc == 0x100) {
611 gb->cpu->b = 1;
612 }
613}
614
615void GBDetectModel(struct GB* gb) {
616 if (gb->model != GB_MODEL_AUTODETECT) {
617 return;
618 }
619 if (gb->biosVf) {
620 switch (_GBBiosCRC32(gb->biosVf)) {
621 case DMG_BIOS_CHECKSUM:
622 case DMG_2_BIOS_CHECKSUM:
623 gb->model = GB_MODEL_DMG;
624 break;
625 case MGB_BIOS_CHECKSUM:
626 gb->model = GB_MODEL_MGB;
627 break;
628 case SGB_BIOS_CHECKSUM:
629 gb->model = GB_MODEL_SGB;
630 break;
631 case SGB2_BIOS_CHECKSUM:
632 gb->model = GB_MODEL_SGB2;
633 break;
634 case CGB_BIOS_CHECKSUM:
635 gb->model = GB_MODEL_CGB;
636 break;
637 default:
638 gb->biosVf->close(gb->biosVf);
639 gb->biosVf = NULL;
640 }
641 }
642 if (gb->model == GB_MODEL_AUTODETECT && gb->memory.rom) {
643 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
644 if (cart->cgb & 0x80) {
645 gb->model = GB_MODEL_CGB;
646 } else if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
647 gb->model = GB_MODEL_SGB;
648 } else {
649 gb->model = GB_MODEL_DMG;
650 }
651 }
652
653 switch (gb->model) {
654 case GB_MODEL_DMG:
655 case GB_MODEL_SGB:
656 case GB_MODEL_AUTODETECT: //Silence warnings
657 gb->audio.style = GB_AUDIO_DMG;
658 break;
659 case GB_MODEL_MGB:
660 case GB_MODEL_SGB2:
661 gb->audio.style = GB_AUDIO_MGB;
662 break;
663 case GB_MODEL_AGB:
664 case GB_MODEL_CGB:
665 gb->audio.style = GB_AUDIO_CGB;
666 break;
667 }
668}
669
670int GBValidModels(const uint8_t* bank0) {
671 const struct GBCartridge* cart = (const struct GBCartridge*) &bank0[0x100];
672 int models;
673 if (cart->cgb == 0x80) {
674 models = GB_MODEL_CGB | GB_MODEL_MGB;
675 } else if (cart->cgb == 0xC0) {
676 models = GB_MODEL_CGB;
677 } else {
678 models = GB_MODEL_MGB;
679 }
680 if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
681 models |= GB_MODEL_SGB;
682 }
683 return models;
684}
685
686void GBUpdateIRQs(struct GB* gb) {
687 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F;
688 if (!irqs) {
689 gb->cpu->irqPending = false;
690 return;
691 }
692 gb->cpu->halted = false;
693
694 if (!gb->memory.ime) {
695 gb->cpu->irqPending = false;
696 return;
697 }
698 if (gb->cpu->irqPending) {
699 return;
700 }
701 SM83RaiseIRQ(gb->cpu);
702}
703
704void GBProcessEvents(struct SM83Core* cpu) {
705 struct GB* gb = (struct GB*) cpu->master;
706 do {
707 int32_t cycles = cpu->cycles;
708 int32_t nextEvent;
709
710 cpu->cycles = 0;
711 cpu->nextEvent = INT_MAX;
712
713 nextEvent = cycles;
714 do {
715#ifdef USE_DEBUGGERS
716 gb->timing.globalCycles += nextEvent;
717#endif
718 nextEvent = mTimingTick(&gb->timing, nextEvent);
719 } while (gb->cpuBlocked);
720 // This loop cannot early exit until the SM83 run loop properly handles mid-M-cycle-exits
721 cpu->nextEvent = nextEvent;
722
723 if (cpu->halted) {
724 cpu->cycles = cpu->nextEvent;
725 if (!gb->memory.ie || !gb->memory.ime) {
726 break;
727 }
728 }
729 if (gb->earlyExit) {
730 break;
731 }
732 } while (cpu->cycles >= cpu->nextEvent);
733 gb->earlyExit = false;
734 if (gb->cpuBlocked) {
735 cpu->cycles = cpu->nextEvent;
736 }
737}
738
739void GBSetInterrupts(struct SM83Core* cpu, bool enable) {
740 struct GB* gb = (struct GB*) cpu->master;
741 mTimingDeschedule(&gb->timing, &gb->eiPending);
742 if (!enable) {
743 gb->memory.ime = false;
744 GBUpdateIRQs(gb);
745 } else {
746 mTimingSchedule(&gb->timing, &gb->eiPending, 4 * cpu->tMultiplier);
747 }
748}
749
750uint16_t GBIRQVector(struct SM83Core* cpu) {
751 struct GB* gb = (struct GB*) cpu->master;
752 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF];
753
754 if (irqs & (1 << GB_IRQ_VBLANK)) {
755 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_VBLANK);
756 return GB_VECTOR_VBLANK;
757 }
758 if (irqs & (1 << GB_IRQ_LCDSTAT)) {
759 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_LCDSTAT);
760 return GB_VECTOR_LCDSTAT;
761 }
762 if (irqs & (1 << GB_IRQ_TIMER)) {
763 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_TIMER);
764 return GB_VECTOR_TIMER;
765 }
766 if (irqs & (1 << GB_IRQ_SIO)) {
767 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_SIO);
768 return GB_VECTOR_SIO;
769 }
770 if (irqs & (1 << GB_IRQ_KEYPAD)) {
771 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_KEYPAD);
772 return GB_VECTOR_KEYPAD;
773 }
774 return 0;
775}
776
777static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate) {
778 UNUSED(timing);
779 UNUSED(cyclesLate);
780 struct GB* gb = user;
781 gb->memory.ime = true;
782 GBUpdateIRQs(gb);
783}
784
785void GBHalt(struct SM83Core* cpu) {
786 struct GB* gb = (struct GB*) cpu->master;
787 if (!(gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F)) {
788 cpu->cycles = cpu->nextEvent;
789 cpu->halted = true;
790 } else if (!gb->memory.ime) {
791 mLOG(GB, GAME_ERROR, "HALT bug");
792 cpu->executionState = SM83_CORE_HALT_BUG;
793 }
794}
795
796void GBStop(struct SM83Core* cpu) {
797 struct GB* gb = (struct GB*) cpu->master;
798 if (gb->model >= GB_MODEL_CGB && gb->memory.io[GB_REG_KEY1] & 1) {
799 gb->doubleSpeed ^= 1;
800 gb->cpu->tMultiplier = 2 - gb->doubleSpeed;
801 gb->memory.io[GB_REG_KEY1] = 0;
802 gb->memory.io[GB_REG_KEY1] |= gb->doubleSpeed << 7;
803 } else {
804 int sleep = ~(gb->memory.io[GB_REG_JOYP] & 0x30);
805 size_t c;
806 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
807 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
808 if (sleep && callbacks->sleep) {
809 callbacks->sleep(callbacks->context);
810 } else if (callbacks->shutdown) {
811 callbacks->shutdown(callbacks->context);
812 }
813 }
814 }
815}
816
817void GBIllegal(struct SM83Core* cpu) {
818 struct GB* gb = (struct GB*) cpu->master;
819 mLOG(GB, GAME_ERROR, "Hit illegal opcode at address %04X:%02X", cpu->pc, cpu->bus);
820#ifdef USE_DEBUGGERS
821 if (cpu->components && cpu->components[CPU_COMPONENT_DEBUGGER]) {
822 struct mDebuggerEntryInfo info = {
823 .address = cpu->pc,
824 .type.bp.opcode = cpu->bus
825 };
826 mDebuggerEnter((struct mDebugger*) cpu->components[CPU_COMPONENT_DEBUGGER], DEBUGGER_ENTER_ILLEGAL_OP, &info);
827 }
828#endif
829 // Hang forever
830 gb->memory.ime = 0;
831 --cpu->pc;
832}
833
834bool GBIsROM(struct VFile* vf) {
835 if (!vf) {
836 return false;
837 }
838 vf->seek(vf, 0x104, SEEK_SET);
839 uint8_t header[4];
840
841 if (vf->read(vf, &header, sizeof(header)) < (ssize_t) sizeof(header)) {
842 return false;
843 }
844 if (memcmp(header, _knownHeader, sizeof(header))) {
845 return false;
846 }
847 return true;
848}
849
850void GBGetGameTitle(const struct GB* gb, char* out) {
851 const struct GBCartridge* cart = NULL;
852 if (gb->memory.rom) {
853 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
854 }
855 if (!cart) {
856 return;
857 }
858 if (cart->oldLicensee != 0x33) {
859 memcpy(out, cart->titleLong, 16);
860 } else {
861 memcpy(out, cart->titleShort, 11);
862 }
863}
864
865void GBGetGameCode(const struct GB* gb, char* out) {
866 memset(out, 0, 8);
867 const struct GBCartridge* cart = NULL;
868 if (gb->memory.rom) {
869 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
870 }
871 if (!cart) {
872 return;
873 }
874 if (cart->cgb == 0xC0) {
875 memcpy(out, "CGB-????", 8);
876 } else {
877 memcpy(out, "DMG-????", 8);
878 }
879 if (cart->oldLicensee == 0x33) {
880 memcpy(&out[4], cart->maker, 4);
881 }
882}
883
884void GBFrameStarted(struct GB* gb) {
885 GBTestKeypadIRQ(gb);
886
887 size_t c;
888 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
889 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
890 if (callbacks->videoFrameStarted) {
891 callbacks->videoFrameStarted(callbacks->context);
892 }
893 }
894}
895
896void GBFrameEnded(struct GB* gb) {
897 GBSramClean(gb, gb->video.frameCounter);
898
899 if (gb->cpu->components && gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE]) {
900 struct mCheatDevice* device = (struct mCheatDevice*) gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE];
901 size_t i;
902 for (i = 0; i < mCheatSetsSize(&device->cheats); ++i) {
903 struct mCheatSet* cheats = *mCheatSetsGetPointer(&device->cheats, i);
904 mCheatRefresh(device, cheats);
905 }
906 }
907
908 // TODO: Move to common code
909 if (gb->stream && gb->stream->postVideoFrame) {
910 const color_t* pixels;
911 size_t stride;
912 gb->video.renderer->getPixels(gb->video.renderer, &stride, (const void**) &pixels);
913 gb->stream->postVideoFrame(gb->stream, pixels, stride);
914 }
915
916 size_t c;
917 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
918 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
919 if (callbacks->videoFrameEnded) {
920 callbacks->videoFrameEnded(callbacks->context);
921 }
922 }
923}
924
925enum GBModel GBNameToModel(const char* model) {
926 if (strcasecmp(model, "DMG") == 0) {
927 return GB_MODEL_DMG;
928 } else if (strcasecmp(model, "CGB") == 0) {
929 return GB_MODEL_CGB;
930 } else if (strcasecmp(model, "AGB") == 0) {
931 return GB_MODEL_AGB;
932 } else if (strcasecmp(model, "SGB") == 0) {
933 return GB_MODEL_SGB;
934 } else if (strcasecmp(model, "MGB") == 0) {
935 return GB_MODEL_MGB;
936 } else if (strcasecmp(model, "SGB2") == 0) {
937 return GB_MODEL_SGB2;
938 }
939 return GB_MODEL_AUTODETECT;
940}
941
942const char* GBModelToName(enum GBModel model) {
943 switch (model) {
944 case GB_MODEL_DMG:
945 return "DMG";
946 case GB_MODEL_SGB:
947 return "SGB";
948 case GB_MODEL_MGB:
949 return "MGB";
950 case GB_MODEL_SGB2:
951 return "SGB2";
952 case GB_MODEL_CGB:
953 return "CGB";
954 case GB_MODEL_AGB:
955 return "AGB";
956 default:
957 case GB_MODEL_AUTODETECT:
958 return NULL;
959 }
960}