src/gb/gb.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/gb.h>
7
8#include <mgba/internal/gb/io.h>
9#include <mgba/internal/gb/mbc.h>
10#include <mgba/internal/sm83/sm83.h>
11
12#include <mgba/core/core.h>
13#include <mgba/core/cheats.h>
14#include <mgba-util/crc32.h>
15#include <mgba-util/memory.h>
16#include <mgba-util/math.h>
17#include <mgba-util/patch.h>
18#include <mgba-util/vfs.h>
19
20#define CLEANUP_THRESHOLD 15
21
22const uint32_t CGB_SM83_FREQUENCY = 0x800000;
23const uint32_t SGB_SM83_FREQUENCY = 0x418B1E;
24
25const uint32_t GB_COMPONENT_MAGIC = 0x400000;
26
27static const uint8_t _knownHeader[4] = { 0xCE, 0xED, 0x66, 0x66};
28
29#define DMG_BIOS_CHECKSUM 0xC2F5CC97
30#define DMG_2_BIOS_CHECKSUM 0x59C8598E
31#define MGB_BIOS_CHECKSUM 0xE6920754
32#define SGB_BIOS_CHECKSUM 0xEC8A83B9
33#define SGB2_BIOS_CHECKSUM 0X53D0DD63
34#define CGB_BIOS_CHECKSUM 0x41884E46
35
36mLOG_DEFINE_CATEGORY(GB, "GB", "gb");
37
38static void GBInit(void* cpu, struct mCPUComponent* component);
39static void GBDeinit(struct mCPUComponent* component);
40static void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh);
41static void GBProcessEvents(struct SM83Core* cpu);
42static void GBSetInterrupts(struct SM83Core* cpu, bool enable);
43static uint16_t GBIRQVector(struct SM83Core* cpu);
44static void GBIllegal(struct SM83Core* cpu);
45static void GBStop(struct SM83Core* cpu);
46
47static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate);
48
49void GBCreate(struct GB* gb) {
50 gb->d.id = GB_COMPONENT_MAGIC;
51 gb->d.init = GBInit;
52 gb->d.deinit = GBDeinit;
53}
54
55static void GBInit(void* cpu, struct mCPUComponent* component) {
56 struct GB* gb = (struct GB*) component;
57 gb->cpu = cpu;
58 gb->sync = NULL;
59
60 GBInterruptHandlerInit(&gb->cpu->irqh);
61 GBMemoryInit(gb);
62
63 gb->video.p = gb;
64 GBVideoInit(&gb->video);
65
66 gb->audio.p = gb;
67 GBAudioInit(&gb->audio, 2048, &gb->memory.io[GB_REG_NR52], GB_AUDIO_DMG); // TODO: Remove magic constant
68
69 gb->sio.p = gb;
70 GBSIOInit(&gb->sio);
71
72 gb->timer.p = gb;
73
74 gb->model = GB_MODEL_AUTODETECT;
75
76 gb->biosVf = NULL;
77 gb->romVf = NULL;
78 gb->sramVf = NULL;
79 gb->sramRealVf = NULL;
80
81 gb->isPristine = false;
82 gb->pristineRomSize = 0;
83 gb->yankedRomSize = 0;
84
85 mCoreCallbacksListInit(&gb->coreCallbacks, 0);
86 gb->stream = NULL;
87
88 mTimingInit(&gb->timing, &gb->cpu->cycles, &gb->cpu->nextEvent);
89 gb->audio.timing = &gb->timing;
90
91 gb->eiPending.name = "GB EI";
92 gb->eiPending.callback = _enableInterrupts;
93 gb->eiPending.context = gb;
94 gb->eiPending.priority = 0;
95}
96
97static void GBDeinit(struct mCPUComponent* component) {
98 struct GB* gb = (struct GB*) component;
99 mTimingDeinit(&gb->timing);
100}
101
102bool GBLoadROM(struct GB* gb, struct VFile* vf) {
103 if (!vf) {
104 return false;
105 }
106 GBUnloadROM(gb);
107 gb->romVf = vf;
108 gb->pristineRomSize = vf->size(vf);
109 vf->seek(vf, 0, SEEK_SET);
110 gb->isPristine = true;
111 gb->memory.rom = vf->map(vf, gb->pristineRomSize, MAP_READ);
112 if (!gb->memory.rom) {
113 return false;
114 }
115 gb->yankedRomSize = 0;
116 gb->memory.romSize = gb->pristineRomSize;
117 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
118 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
119 GBMBCReset(gb);
120
121 if (gb->cpu) {
122 struct SM83Core* cpu = gb->cpu;
123 cpu->memory.setActiveRegion(cpu, cpu->pc);
124 }
125
126 // TODO: error check
127 return true;
128}
129
130void GBYankROM(struct GB* gb) {
131 gb->yankedRomSize = gb->memory.romSize;
132 gb->yankedMbc = gb->memory.mbcType;
133 gb->memory.romSize = 0;
134 gb->memory.mbcType = GB_MBC_NONE;
135 GBMBCReset(gb);
136
137 if (gb->cpu) {
138 struct SM83Core* cpu = gb->cpu;
139 cpu->memory.setActiveRegion(cpu, cpu->pc);
140 }
141}
142
143static void GBSramDeinit(struct GB* gb) {
144 if (gb->sramVf) {
145 gb->sramVf->unmap(gb->sramVf, gb->memory.sram, gb->sramSize);
146 if (gb->memory.mbcType == GB_MBC3_RTC && gb->sramVf == gb->sramRealVf) {
147 GBMBCRTCWrite(gb);
148 }
149 gb->sramVf = NULL;
150 } else if (gb->memory.sram) {
151 mappedMemoryFree(gb->memory.sram, gb->sramSize);
152 }
153 gb->memory.sram = 0;
154}
155
156bool GBLoadSave(struct GB* gb, struct VFile* vf) {
157 GBSramDeinit(gb);
158 gb->sramVf = vf;
159 gb->sramRealVf = vf;
160 if (gb->sramSize) {
161 GBResizeSram(gb, gb->sramSize);
162 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
163
164 if (gb->memory.mbcType == GB_MBC3_RTC) {
165 GBMBCRTCRead(gb);
166 }
167 }
168 return vf;
169}
170
171void GBResizeSram(struct GB* gb, size_t size) {
172 if (gb->memory.sram && size <= gb->sramSize) {
173 return;
174 }
175 struct VFile* vf = gb->sramVf;
176 if (vf) {
177 if (vf == gb->sramRealVf) {
178 ssize_t vfSize = vf->size(vf);
179 if (vfSize >= 0 && (size_t) vfSize < size) {
180 uint8_t extdataBuffer[0x100];
181 if (vfSize & 0xFF) {
182 vf->seek(vf, -(vfSize & 0xFF), SEEK_END);
183 vf->read(vf, extdataBuffer, vfSize & 0xFF);
184 }
185 if (gb->memory.sram) {
186 vf->unmap(vf, gb->memory.sram, gb->sramSize);
187 }
188 vf->truncate(vf, size + (vfSize & 0xFF));
189 if (vfSize & 0xFF) {
190 vf->seek(vf, size, SEEK_SET);
191 vf->write(vf, extdataBuffer, vfSize & 0xFF);
192 }
193 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
194 memset(&gb->memory.sram[vfSize], 0xFF, size - vfSize);
195 } else if (size > gb->sramSize || !gb->memory.sram) {
196 if (gb->memory.sram) {
197 vf->unmap(vf, gb->memory.sram, gb->sramSize);
198 }
199 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
200 }
201 } else {
202 if (gb->memory.sram) {
203 vf->unmap(vf, gb->memory.sram, gb->sramSize);
204 }
205 gb->memory.sram = vf->map(vf, size, MAP_READ);
206 }
207 if (gb->memory.sram == (void*) -1) {
208 gb->memory.sram = NULL;
209 }
210 } else if (size) {
211 uint8_t* newSram = anonymousMemoryMap(size);
212 if (gb->memory.sram) {
213 if (size > gb->sramSize) {
214 memcpy(newSram, gb->memory.sram, gb->sramSize);
215 memset(&newSram[gb->sramSize], 0xFF, size - gb->sramSize);
216 } else {
217 memcpy(newSram, gb->memory.sram, size);
218 }
219 mappedMemoryFree(gb->memory.sram, gb->sramSize);
220 } else {
221 memset(newSram, 0xFF, size);
222 }
223 gb->memory.sram = newSram;
224 }
225 if (gb->sramSize < size) {
226 gb->sramSize = size;
227 }
228}
229
230void GBSramClean(struct GB* gb, uint32_t frameCount) {
231 // TODO: Share with GBASavedataClean
232 if (!gb->sramVf) {
233 return;
234 }
235 if (gb->sramDirty & GB_SRAM_DIRT_NEW) {
236 gb->sramDirtAge = frameCount;
237 gb->sramDirty &= ~GB_SRAM_DIRT_NEW;
238 if (!(gb->sramDirty & GB_SRAM_DIRT_SEEN)) {
239 gb->sramDirty |= GB_SRAM_DIRT_SEEN;
240 }
241 } else if ((gb->sramDirty & GB_SRAM_DIRT_SEEN) && frameCount - gb->sramDirtAge > CLEANUP_THRESHOLD) {
242 if (gb->sramMaskWriteback) {
243 GBSavedataUnmask(gb);
244 }
245 if (gb->memory.mbcType == GB_MBC3_RTC) {
246 GBMBCRTCWrite(gb);
247 }
248 gb->sramDirty = 0;
249 if (gb->memory.sram && gb->sramVf->sync(gb->sramVf, gb->memory.sram, gb->sramSize)) {
250 mLOG(GB_MEM, INFO, "Savedata synced");
251 } else {
252 mLOG(GB_MEM, INFO, "Savedata failed to sync!");
253 }
254
255 size_t c;
256 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
257 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
258 if (callbacks->savedataUpdated) {
259 callbacks->savedataUpdated(callbacks->context);
260 }
261 }
262 }
263}
264
265void GBSavedataMask(struct GB* gb, struct VFile* vf, bool writeback) {
266 struct VFile* oldVf = gb->sramVf;
267 GBSramDeinit(gb);
268 if (oldVf && oldVf != gb->sramRealVf) {
269 oldVf->close(oldVf);
270 }
271 gb->sramVf = vf;
272 gb->sramMaskWriteback = writeback;
273 gb->memory.sram = vf->map(vf, gb->sramSize, MAP_READ);
274 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
275}
276
277void GBSavedataUnmask(struct GB* gb) {
278 if (!gb->sramRealVf || gb->sramVf == gb->sramRealVf) {
279 return;
280 }
281 struct VFile* vf = gb->sramVf;
282 GBSramDeinit(gb);
283 gb->sramVf = gb->sramRealVf;
284 gb->memory.sram = gb->sramVf->map(gb->sramVf, gb->sramSize, MAP_WRITE);
285 if (gb->sramMaskWriteback) {
286 vf->seek(vf, 0, SEEK_SET);
287 vf->read(vf, gb->memory.sram, gb->sramSize);
288 gb->sramMaskWriteback = false;
289 }
290 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
291 vf->close(vf);
292}
293
294void GBUnloadROM(struct GB* gb) {
295 // TODO: Share with GBAUnloadROM
296 off_t romBase = gb->memory.romBase - gb->memory.rom;
297 if (romBase >= 0 && ((size_t) romBase < gb->memory.romSize || (size_t) romBase < gb->yankedRomSize)) {
298 gb->memory.romBase = NULL;
299 }
300 if (gb->memory.rom && !gb->isPristine) {
301 if (gb->yankedRomSize) {
302 gb->yankedRomSize = 0;
303 }
304 mappedMemoryFree(gb->memory.rom, GB_SIZE_CART_MAX);
305 }
306
307 if (gb->romVf) {
308#ifndef FIXED_ROM_BUFFER
309 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
310#endif
311 gb->romVf->close(gb->romVf);
312 gb->romVf = NULL;
313 }
314 gb->memory.rom = NULL;
315 gb->memory.mbcType = GB_MBC_AUTODETECT;
316 gb->isPristine = false;
317
318 gb->sramMaskWriteback = false;
319 GBSavedataUnmask(gb);
320 GBSramDeinit(gb);
321 if (gb->sramRealVf) {
322 gb->sramRealVf->close(gb->sramRealVf);
323 }
324 gb->sramRealVf = NULL;
325 gb->sramVf = NULL;
326 if (gb->memory.cam && gb->memory.cam->stopRequestImage) {
327 gb->memory.cam->stopRequestImage(gb->memory.cam);
328 }
329}
330
331void GBSynthesizeROM(struct VFile* vf) {
332 if (!vf) {
333 return;
334 }
335 const struct GBCartridge cart = {
336 .logo = { _knownHeader[0], _knownHeader[1], _knownHeader[2], _knownHeader[3]}
337 };
338
339 vf->seek(vf, 0x100, SEEK_SET);
340 vf->write(vf, &cart, sizeof(cart));
341}
342
343void GBLoadBIOS(struct GB* gb, struct VFile* vf) {
344 gb->biosVf = vf;
345}
346
347void GBApplyPatch(struct GB* gb, struct Patch* patch) {
348 size_t patchedSize = patch->outputSize(patch, gb->memory.romSize);
349 if (!patchedSize) {
350 return;
351 }
352 if (patchedSize > GB_SIZE_CART_MAX) {
353 patchedSize = GB_SIZE_CART_MAX;
354 }
355 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
356 if (!patch->applyPatch(patch, gb->memory.rom, gb->pristineRomSize, newRom, patchedSize)) {
357 mappedMemoryFree(newRom, GB_SIZE_CART_MAX);
358 return;
359 }
360 if (gb->romVf) {
361#ifndef FIXED_ROM_BUFFER
362 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
363#endif
364 gb->romVf->close(gb->romVf);
365 gb->romVf = NULL;
366 }
367 gb->isPristine = false;
368 if (gb->memory.romBase == gb->memory.rom) {
369 gb->memory.romBase = newRom;
370 }
371 gb->memory.rom = newRom;
372 gb->memory.romSize = patchedSize;
373 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
374 gb->cpu->memory.setActiveRegion(gb->cpu, gb->cpu->pc);
375}
376
377void GBDestroy(struct GB* gb) {
378 GBUnmapBIOS(gb);
379 GBUnloadROM(gb);
380
381 if (gb->biosVf) {
382 gb->biosVf->close(gb->biosVf);
383 gb->biosVf = 0;
384 }
385
386 GBMemoryDeinit(gb);
387 GBAudioDeinit(&gb->audio);
388 GBVideoDeinit(&gb->video);
389 GBSIODeinit(&gb->sio);
390 mCoreCallbacksListDeinit(&gb->coreCallbacks);
391}
392
393void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh) {
394 irqh->reset = GBReset;
395 irqh->processEvents = GBProcessEvents;
396 irqh->setInterrupts = GBSetInterrupts;
397 irqh->irqVector = GBIRQVector;
398 irqh->hitIllegal = GBIllegal;
399 irqh->stop = GBStop;
400 irqh->halt = GBHalt;
401}
402
403static uint32_t _GBBiosCRC32(struct VFile* vf) {
404 ssize_t size = vf->size(vf);
405 if (size <= 0 || size > GB_SIZE_CART_BANK0) {
406 return 0;
407 }
408 void* bios = vf->map(vf, size, MAP_READ);
409 uint32_t biosCrc = doCrc32(bios, size);
410 vf->unmap(vf, bios, size);
411 return biosCrc;
412}
413
414bool GBIsBIOS(struct VFile* vf) {
415 switch (_GBBiosCRC32(vf)) {
416 case DMG_BIOS_CHECKSUM:
417 case DMG_2_BIOS_CHECKSUM:
418 case MGB_BIOS_CHECKSUM:
419 case SGB_BIOS_CHECKSUM:
420 case SGB2_BIOS_CHECKSUM:
421 case CGB_BIOS_CHECKSUM:
422 return true;
423 default:
424 return false;
425 }
426}
427
428void GBReset(struct SM83Core* cpu) {
429 struct GB* gb = (struct GB*) cpu->master;
430 gb->memory.romBase = gb->memory.rom;
431 GBDetectModel(gb);
432
433 cpu->b = 0;
434 cpu->d = 0;
435
436 gb->timer.internalDiv = 0;
437
438 gb->cpuBlocked = false;
439 gb->earlyExit = false;
440 gb->doubleSpeed = 0;
441
442 if (gb->yankedRomSize) {
443 gb->memory.romSize = gb->yankedRomSize;
444 gb->memory.mbcType = gb->yankedMbc;
445 gb->yankedRomSize = 0;
446 }
447
448 gb->sgbBit = -1;
449 gb->sgbControllers = 0;
450 gb->sgbCurrentController = 0;
451 gb->currentSgbBits = 0;
452 gb->sgbIncrement = false;
453 memset(gb->sgbPacket, 0, sizeof(gb->sgbPacket));
454
455 mTimingClear(&gb->timing);
456
457 GBMemoryReset(gb);
458
459 if (gb->biosVf) {
460 if (!GBIsBIOS(gb->biosVf)) {
461 gb->biosVf->close(gb->biosVf);
462 gb->biosVf = NULL;
463 } else {
464 GBMapBIOS(gb);
465 cpu->a = 0;
466 cpu->f.packed = 0;
467 cpu->c = 0;
468 cpu->e = 0;
469 cpu->h = 0;
470 cpu->l = 0;
471 cpu->sp = 0;
472 cpu->pc = 0;
473 }
474 }
475
476 GBVideoReset(&gb->video);
477 GBTimerReset(&gb->timer);
478 GBIOReset(gb);
479 if (!gb->biosVf && gb->memory.rom) {
480 GBSkipBIOS(gb);
481 } else {
482 mTimingSchedule(&gb->timing, &gb->timer.event, 0);
483 }
484
485 GBAudioReset(&gb->audio);
486 GBSIOReset(&gb->sio);
487
488 cpu->memory.setActiveRegion(cpu, cpu->pc);
489
490 gb->sramMaskWriteback = false;
491 GBSavedataUnmask(gb);
492}
493
494void GBSkipBIOS(struct GB* gb) {
495 struct SM83Core* cpu = gb->cpu;
496 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
497 int nextDiv = 0;
498
499 switch (gb->model) {
500 case GB_MODEL_AUTODETECT: // Silence warnings
501 gb->model = GB_MODEL_DMG;
502 // Fall through
503 case GB_MODEL_DMG:
504 cpu->a = 1;
505 cpu->f.packed = 0xB0;
506 cpu->c = 0x13;
507 cpu->e = 0xD8;
508 cpu->h = 1;
509 cpu->l = 0x4D;
510 gb->timer.internalDiv = 0xABC;
511 nextDiv = 4;
512 break;
513 case GB_MODEL_SGB:
514 cpu->a = 1;
515 cpu->f.packed = 0x00;
516 cpu->c = 0x14;
517 cpu->e = 0x00;
518 cpu->h = 0xC0;
519 cpu->l = 0x60;
520 gb->timer.internalDiv = 0xD85;
521 nextDiv = 8;
522 break;
523 case GB_MODEL_MGB:
524 cpu->a = 0xFF;
525 cpu->f.packed = 0xB0;
526 cpu->c = 0x13;
527 cpu->e = 0xD8;
528 cpu->h = 1;
529 cpu->l = 0x4D;
530 gb->timer.internalDiv = 0xABC;
531 nextDiv = 4;
532 break;
533 case GB_MODEL_SGB2:
534 cpu->a = 0xFF;
535 cpu->f.packed = 0x00;
536 cpu->c = 0x14;
537 cpu->e = 0x00;
538 cpu->h = 0xC0;
539 cpu->l = 0x60;
540 gb->timer.internalDiv = 0xD84;
541 nextDiv = 8;
542 break;
543 case GB_MODEL_AGB:
544 cpu->b = 1;
545 // Fall through
546 case GB_MODEL_CGB:
547 cpu->a = 0x11;
548 if (gb->model == GB_MODEL_AGB) {
549 cpu->f.packed = 0x00;
550 } else {
551 cpu->f.packed = 0x80;
552 }
553 cpu->c = 0;
554 cpu->h = 0;
555 if (cart->cgb & 0x80) {
556 cpu->d = 0xFF;
557 cpu->e = 0x56;
558 cpu->l = 0x0D;
559 gb->timer.internalDiv = 0x2F0;
560 } else {
561 cpu->e = 0x08;
562 cpu->l = 0x7C;
563 gb->timer.internalDiv = 0x260;
564 gb->model = GB_MODEL_DMG;
565 gb->memory.io[GB_REG_KEY1] = 0xFF;
566 gb->memory.io[GB_REG_BCPS] = 0x88; // Faked writing 4 BG palette entries
567 gb->memory.io[GB_REG_OCPS] = 0x90; // Faked writing 8 OBJ palette entries
568 gb->memory.io[GB_REG_SVBK] = 0xFF;
569 GBVideoDisableCGB(&gb->video);
570 }
571 nextDiv = 0xC;
572 break;
573 }
574
575 cpu->sp = 0xFFFE;
576 cpu->pc = 0x100;
577
578 gb->timer.nextDiv = GB_DMG_DIV_PERIOD * (16 - nextDiv);
579
580 mTimingDeschedule(&gb->timing, &gb->timer.event);
581 mTimingSchedule(&gb->timing, &gb->timer.event, gb->timer.nextDiv);
582
583 GBIOWrite(gb, GB_REG_LCDC, 0x91);
584 gb->memory.io[GB_REG_BANK] = 0x1;
585 GBVideoSkipBIOS(&gb->video);
586
587 if (gb->biosVf) {
588 GBUnmapBIOS(gb);
589 }
590}
591
592void GBMapBIOS(struct GB* gb) {
593 gb->biosVf->seek(gb->biosVf, 0, SEEK_SET);
594 gb->memory.romBase = malloc(GB_SIZE_CART_BANK0);
595 ssize_t size = gb->biosVf->read(gb->biosVf, gb->memory.romBase, GB_SIZE_CART_BANK0);
596 if (gb->memory.rom) {
597 memcpy(&gb->memory.romBase[size], &gb->memory.rom[size], GB_SIZE_CART_BANK0 - size);
598 if (size > 0x100) {
599 memcpy(&gb->memory.romBase[0x100], &gb->memory.rom[0x100], sizeof(struct GBCartridge));
600 }
601 }
602}
603
604void GBUnmapBIOS(struct GB* gb) {
605 if (gb->memory.io[GB_REG_BANK] == 0xFF && gb->memory.romBase != gb->memory.rom) {
606 free(gb->memory.romBase);
607 if (gb->memory.mbcType == GB_MMM01) {
608 GBMBCSwitchBank0(gb, gb->memory.romSize / GB_SIZE_CART_BANK0 - 2);
609 } else {
610 GBMBCSwitchBank0(gb, 0);
611 }
612 }
613 // XXX: Force AGB registers for AGB-mode
614 if (gb->model == GB_MODEL_AGB && gb->cpu->pc == 0x100) {
615 gb->cpu->b = 1;
616 }
617}
618
619void GBDetectModel(struct GB* gb) {
620 if (gb->model != GB_MODEL_AUTODETECT) {
621 return;
622 }
623 if (gb->biosVf) {
624 switch (_GBBiosCRC32(gb->biosVf)) {
625 case DMG_BIOS_CHECKSUM:
626 case DMG_2_BIOS_CHECKSUM:
627 gb->model = GB_MODEL_DMG;
628 break;
629 case MGB_BIOS_CHECKSUM:
630 gb->model = GB_MODEL_MGB;
631 break;
632 case SGB_BIOS_CHECKSUM:
633 gb->model = GB_MODEL_SGB;
634 break;
635 case SGB2_BIOS_CHECKSUM:
636 gb->model = GB_MODEL_SGB2;
637 break;
638 case CGB_BIOS_CHECKSUM:
639 gb->model = GB_MODEL_CGB;
640 break;
641 default:
642 gb->biosVf->close(gb->biosVf);
643 gb->biosVf = NULL;
644 }
645 }
646 if (gb->model == GB_MODEL_AUTODETECT && gb->memory.rom) {
647 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
648 if (cart->cgb & 0x80) {
649 gb->model = GB_MODEL_CGB;
650 } else if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
651 gb->model = GB_MODEL_SGB;
652 } else {
653 gb->model = GB_MODEL_DMG;
654 }
655 }
656
657 switch (gb->model) {
658 case GB_MODEL_DMG:
659 case GB_MODEL_SGB:
660 case GB_MODEL_AUTODETECT: //Silence warnings
661 gb->audio.style = GB_AUDIO_DMG;
662 break;
663 case GB_MODEL_MGB:
664 case GB_MODEL_SGB2:
665 gb->audio.style = GB_AUDIO_MGB;
666 break;
667 case GB_MODEL_AGB:
668 case GB_MODEL_CGB:
669 gb->audio.style = GB_AUDIO_CGB;
670 break;
671 }
672}
673
674int GBValidModels(const uint8_t* bank0) {
675 const struct GBCartridge* cart = (const struct GBCartridge*) &bank0[0x100];
676 int models;
677 if (cart->cgb == 0x80) {
678 models = GB_MODEL_CGB | GB_MODEL_MGB;
679 } else if (cart->cgb == 0xC0) {
680 models = GB_MODEL_CGB;
681 } else {
682 models = GB_MODEL_MGB;
683 }
684 if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
685 models |= GB_MODEL_SGB;
686 }
687 return models;
688}
689
690void GBUpdateIRQs(struct GB* gb) {
691 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F;
692 if (!irqs) {
693 gb->cpu->irqPending = false;
694 return;
695 }
696 gb->cpu->halted = false;
697
698 if (!gb->memory.ime) {
699 gb->cpu->irqPending = false;
700 return;
701 }
702 if (gb->cpu->irqPending) {
703 return;
704 }
705 SM83RaiseIRQ(gb->cpu);
706}
707
708void GBProcessEvents(struct SM83Core* cpu) {
709 struct GB* gb = (struct GB*) cpu->master;
710 do {
711 int32_t cycles = cpu->cycles;
712 int32_t nextEvent;
713
714 cpu->cycles = 0;
715 cpu->nextEvent = INT_MAX;
716
717 nextEvent = cycles;
718 do {
719#ifdef USE_DEBUGGERS
720 gb->timing.globalCycles += nextEvent;
721#endif
722 nextEvent = mTimingTick(&gb->timing, nextEvent);
723 } while (gb->cpuBlocked);
724 // This loop cannot early exit until the SM83 run loop properly handles mid-M-cycle-exits
725 cpu->nextEvent = nextEvent;
726
727 if (cpu->halted) {
728 cpu->cycles = cpu->nextEvent;
729 if (!gb->memory.ie || !gb->memory.ime) {
730 break;
731 }
732 }
733 if (gb->earlyExit) {
734 break;
735 }
736 } while (cpu->cycles >= cpu->nextEvent);
737 gb->earlyExit = false;
738 if (gb->cpuBlocked) {
739 cpu->cycles = cpu->nextEvent;
740 }
741}
742
743void GBSetInterrupts(struct SM83Core* cpu, bool enable) {
744 struct GB* gb = (struct GB*) cpu->master;
745 mTimingDeschedule(&gb->timing, &gb->eiPending);
746 if (!enable) {
747 gb->memory.ime = false;
748 GBUpdateIRQs(gb);
749 } else {
750 mTimingSchedule(&gb->timing, &gb->eiPending, 4 * cpu->tMultiplier);
751 }
752}
753
754uint16_t GBIRQVector(struct SM83Core* cpu) {
755 struct GB* gb = (struct GB*) cpu->master;
756 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF];
757
758 if (irqs & (1 << GB_IRQ_VBLANK)) {
759 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_VBLANK);
760 return GB_VECTOR_VBLANK;
761 }
762 if (irqs & (1 << GB_IRQ_LCDSTAT)) {
763 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_LCDSTAT);
764 return GB_VECTOR_LCDSTAT;
765 }
766 if (irqs & (1 << GB_IRQ_TIMER)) {
767 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_TIMER);
768 return GB_VECTOR_TIMER;
769 }
770 if (irqs & (1 << GB_IRQ_SIO)) {
771 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_SIO);
772 return GB_VECTOR_SIO;
773 }
774 if (irqs & (1 << GB_IRQ_KEYPAD)) {
775 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_KEYPAD);
776 return GB_VECTOR_KEYPAD;
777 }
778 return 0;
779}
780
781static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate) {
782 UNUSED(timing);
783 UNUSED(cyclesLate);
784 struct GB* gb = user;
785 gb->memory.ime = true;
786 GBUpdateIRQs(gb);
787}
788
789void GBHalt(struct SM83Core* cpu) {
790 struct GB* gb = (struct GB*) cpu->master;
791 if (!(gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F)) {
792 cpu->cycles = cpu->nextEvent;
793 cpu->halted = true;
794 } else if (!gb->memory.ime) {
795 mLOG(GB, GAME_ERROR, "HALT bug");
796 cpu->executionState = SM83_CORE_HALT_BUG;
797 }
798}
799
800void GBStop(struct SM83Core* cpu) {
801 struct GB* gb = (struct GB*) cpu->master;
802 if (gb->model >= GB_MODEL_CGB && gb->memory.io[GB_REG_KEY1] & 1) {
803 gb->doubleSpeed ^= 1;
804 gb->cpu->tMultiplier = 2 - gb->doubleSpeed;
805 gb->memory.io[GB_REG_KEY1] = 0;
806 gb->memory.io[GB_REG_KEY1] |= gb->doubleSpeed << 7;
807 } else {
808 int sleep = ~(gb->memory.io[GB_REG_JOYP] & 0x30);
809 size_t c;
810 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
811 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
812 if (sleep && callbacks->sleep) {
813 callbacks->sleep(callbacks->context);
814 } else if (callbacks->shutdown) {
815 callbacks->shutdown(callbacks->context);
816 }
817 }
818 }
819}
820
821void GBIllegal(struct SM83Core* cpu) {
822 struct GB* gb = (struct GB*) cpu->master;
823 mLOG(GB, GAME_ERROR, "Hit illegal opcode at address %04X:%02X", cpu->pc, cpu->bus);
824#ifdef USE_DEBUGGERS
825 if (cpu->components && cpu->components[CPU_COMPONENT_DEBUGGER]) {
826 struct mDebuggerEntryInfo info = {
827 .address = cpu->pc,
828 .type.bp.opcode = cpu->bus
829 };
830 mDebuggerEnter((struct mDebugger*) cpu->components[CPU_COMPONENT_DEBUGGER], DEBUGGER_ENTER_ILLEGAL_OP, &info);
831 }
832#endif
833 // Hang forever
834 gb->memory.ime = 0;
835 --cpu->pc;
836}
837
838bool GBIsROM(struct VFile* vf) {
839 if (!vf) {
840 return false;
841 }
842 vf->seek(vf, 0x104, SEEK_SET);
843 uint8_t header[4];
844
845 if (vf->read(vf, &header, sizeof(header)) < (ssize_t) sizeof(header)) {
846 return false;
847 }
848 if (memcmp(header, _knownHeader, sizeof(header))) {
849 return false;
850 }
851 return true;
852}
853
854void GBGetGameTitle(const struct GB* gb, char* out) {
855 const struct GBCartridge* cart = NULL;
856 if (gb->memory.rom) {
857 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
858 }
859 if (!cart) {
860 return;
861 }
862 if (cart->oldLicensee != 0x33) {
863 memcpy(out, cart->titleLong, 16);
864 } else {
865 memcpy(out, cart->titleShort, 11);
866 }
867}
868
869void GBGetGameCode(const struct GB* gb, char* out) {
870 memset(out, 0, 8);
871 const struct GBCartridge* cart = NULL;
872 if (gb->memory.rom) {
873 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
874 }
875 if (!cart) {
876 return;
877 }
878 if (cart->cgb == 0xC0) {
879 memcpy(out, "CGB-????", 8);
880 } else {
881 memcpy(out, "DMG-????", 8);
882 }
883 if (cart->oldLicensee == 0x33) {
884 memcpy(&out[4], cart->maker, 4);
885 }
886}
887
888void GBFrameStarted(struct GB* gb) {
889 GBTestKeypadIRQ(gb);
890
891 size_t c;
892 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
893 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
894 if (callbacks->videoFrameStarted) {
895 callbacks->videoFrameStarted(callbacks->context);
896 }
897 }
898}
899
900void GBFrameEnded(struct GB* gb) {
901 GBSramClean(gb, gb->video.frameCounter);
902
903 if (gb->cpu->components && gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE]) {
904 struct mCheatDevice* device = (struct mCheatDevice*) gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE];
905 size_t i;
906 for (i = 0; i < mCheatSetsSize(&device->cheats); ++i) {
907 struct mCheatSet* cheats = *mCheatSetsGetPointer(&device->cheats, i);
908 mCheatRefresh(device, cheats);
909 }
910 }
911
912 // TODO: Move to common code
913 if (gb->stream && gb->stream->postVideoFrame) {
914 const color_t* pixels;
915 size_t stride;
916 gb->video.renderer->getPixels(gb->video.renderer, &stride, (const void**) &pixels);
917 gb->stream->postVideoFrame(gb->stream, pixels, stride);
918 }
919
920 size_t c;
921 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
922 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
923 if (callbacks->videoFrameEnded) {
924 callbacks->videoFrameEnded(callbacks->context);
925 }
926 }
927}
928
929enum GBModel GBNameToModel(const char* model) {
930 if (strcasecmp(model, "DMG") == 0) {
931 return GB_MODEL_DMG;
932 } else if (strcasecmp(model, "CGB") == 0) {
933 return GB_MODEL_CGB;
934 } else if (strcasecmp(model, "AGB") == 0) {
935 return GB_MODEL_AGB;
936 } else if (strcasecmp(model, "SGB") == 0) {
937 return GB_MODEL_SGB;
938 } else if (strcasecmp(model, "MGB") == 0) {
939 return GB_MODEL_MGB;
940 } else if (strcasecmp(model, "SGB2") == 0) {
941 return GB_MODEL_SGB2;
942 }
943 return GB_MODEL_AUTODETECT;
944}
945
946const char* GBModelToName(enum GBModel model) {
947 switch (model) {
948 case GB_MODEL_DMG:
949 return "DMG";
950 case GB_MODEL_SGB:
951 return "SGB";
952 case GB_MODEL_MGB:
953 return "MGB";
954 case GB_MODEL_SGB2:
955 return "SGB2";
956 case GB_MODEL_CGB:
957 return "CGB";
958 case GB_MODEL_AGB:
959 return "AGB";
960 default:
961 case GB_MODEL_AUTODETECT:
962 return NULL;
963 }
964}