src/gb/gb.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/gb.h>
7
8#include <mgba/internal/gb/io.h>
9#include <mgba/internal/gb/mbc.h>
10#include <mgba/internal/sm83/sm83.h>
11
12#include <mgba/core/core.h>
13#include <mgba/core/cheats.h>
14#include <mgba-util/crc32.h>
15#include <mgba-util/memory.h>
16#include <mgba-util/math.h>
17#include <mgba-util/patch.h>
18#include <mgba-util/vfs.h>
19
20#define CLEANUP_THRESHOLD 15
21
22const uint32_t CGB_SM83_FREQUENCY = 0x800000;
23const uint32_t SGB_SM83_FREQUENCY = 0x418B1E;
24
25const uint32_t GB_COMPONENT_MAGIC = 0x400000;
26
27static const uint8_t _knownHeader[4] = { 0xCE, 0xED, 0x66, 0x66};
28
29#define DMG_BIOS_CHECKSUM 0xC2F5CC97
30#define DMG_2_BIOS_CHECKSUM 0x59C8598E
31#define MGB_BIOS_CHECKSUM 0xE6920754
32#define SGB_BIOS_CHECKSUM 0xEC8A83B9
33#define SGB2_BIOS_CHECKSUM 0X53D0DD63
34#define CGB_BIOS_CHECKSUM 0x41884E46
35
36mLOG_DEFINE_CATEGORY(GB, "GB", "gb");
37
38static void GBInit(void* cpu, struct mCPUComponent* component);
39static void GBDeinit(struct mCPUComponent* component);
40static void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh);
41static void GBProcessEvents(struct SM83Core* cpu);
42static void GBSetInterrupts(struct SM83Core* cpu, bool enable);
43static uint16_t GBIRQVector(struct SM83Core* cpu);
44static void GBIllegal(struct SM83Core* cpu);
45static void GBStop(struct SM83Core* cpu);
46
47static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate);
48
49void GBCreate(struct GB* gb) {
50 gb->d.id = GB_COMPONENT_MAGIC;
51 gb->d.init = GBInit;
52 gb->d.deinit = GBDeinit;
53}
54
55static void GBInit(void* cpu, struct mCPUComponent* component) {
56 struct GB* gb = (struct GB*) component;
57 gb->cpu = cpu;
58 gb->sync = NULL;
59
60 GBInterruptHandlerInit(&gb->cpu->irqh);
61 GBMemoryInit(gb);
62
63 gb->video.p = gb;
64 GBVideoInit(&gb->video);
65
66 gb->audio.p = gb;
67 GBAudioInit(&gb->audio, 2048, &gb->memory.io[GB_REG_NR52], GB_AUDIO_DMG); // TODO: Remove magic constant
68
69 gb->sio.p = gb;
70 GBSIOInit(&gb->sio);
71
72 gb->timer.p = gb;
73
74 gb->model = GB_MODEL_AUTODETECT;
75
76 gb->biosVf = NULL;
77 gb->romVf = NULL;
78 gb->sramVf = NULL;
79 gb->sramRealVf = NULL;
80
81 gb->isPristine = false;
82 gb->pristineRomSize = 0;
83 gb->yankedRomSize = 0;
84
85 mCoreCallbacksListInit(&gb->coreCallbacks, 0);
86 gb->stream = NULL;
87
88 mTimingInit(&gb->timing, &gb->cpu->cycles, &gb->cpu->nextEvent);
89 gb->audio.timing = &gb->timing;
90
91 gb->eiPending.name = "GB EI";
92 gb->eiPending.callback = _enableInterrupts;
93 gb->eiPending.context = gb;
94 gb->eiPending.priority = 0;
95}
96
97static void GBDeinit(struct mCPUComponent* component) {
98 struct GB* gb = (struct GB*) component;
99 mTimingDeinit(&gb->timing);
100}
101
102bool GBLoadROM(struct GB* gb, struct VFile* vf) {
103 if (!vf) {
104 return false;
105 }
106 GBUnloadROM(gb);
107 gb->romVf = vf;
108 gb->pristineRomSize = vf->size(vf);
109 vf->seek(vf, 0, SEEK_SET);
110 gb->isPristine = true;
111 gb->memory.rom = vf->map(vf, gb->pristineRomSize, MAP_READ);
112 if (!gb->memory.rom) {
113 return false;
114 }
115 gb->yankedRomSize = 0;
116 gb->memory.romBase = gb->memory.rom;
117 gb->memory.romSize = gb->pristineRomSize;
118 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
119 GBMBCInit(gb);
120
121 if (gb->cpu) {
122 struct SM83Core* cpu = gb->cpu;
123 cpu->memory.setActiveRegion(cpu, cpu->pc);
124 }
125
126 // TODO: error check
127 return true;
128}
129
130void GBYankROM(struct GB* gb) {
131 gb->yankedRomSize = gb->memory.romSize;
132 gb->yankedMbc = gb->memory.mbcType;
133 gb->memory.romSize = 0;
134 gb->memory.mbcType = GB_MBC_NONE;
135 gb->memory.sramAccess = false;
136
137 if (gb->cpu) {
138 struct SM83Core* cpu = gb->cpu;
139 cpu->memory.setActiveRegion(cpu, cpu->pc);
140 }
141}
142
143static void GBSramDeinit(struct GB* gb) {
144 if (gb->sramVf) {
145 gb->sramVf->unmap(gb->sramVf, gb->memory.sram, gb->sramSize);
146 if (gb->memory.mbcType == GB_MBC3_RTC && gb->sramVf == gb->sramRealVf) {
147 GBMBCRTCWrite(gb);
148 }
149 gb->sramVf = NULL;
150 } else if (gb->memory.sram) {
151 mappedMemoryFree(gb->memory.sram, gb->sramSize);
152 }
153 gb->memory.sram = 0;
154}
155
156bool GBLoadSave(struct GB* gb, struct VFile* vf) {
157 GBSramDeinit(gb);
158 gb->sramVf = vf;
159 gb->sramRealVf = vf;
160 if (gb->sramSize) {
161 GBResizeSram(gb, gb->sramSize);
162 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
163
164 if (gb->memory.mbcType == GB_MBC3_RTC) {
165 GBMBCRTCRead(gb);
166 }
167 }
168 return vf;
169}
170
171void GBResizeSram(struct GB* gb, size_t size) {
172 if (gb->memory.sram && size <= gb->sramSize) {
173 return;
174 }
175 struct VFile* vf = gb->sramVf;
176 if (vf) {
177 if (vf == gb->sramRealVf) {
178 ssize_t vfSize = vf->size(vf);
179 if (vfSize >= 0 && (size_t) vfSize < size) {
180 uint8_t extdataBuffer[0x100];
181 if (vfSize & 0xFF) {
182 vf->seek(vf, -(vfSize & 0xFF), SEEK_END);
183 vf->read(vf, extdataBuffer, vfSize & 0xFF);
184 }
185 if (gb->memory.sram) {
186 vf->unmap(vf, gb->memory.sram, gb->sramSize);
187 }
188 vf->truncate(vf, size + (vfSize & 0xFF));
189 if (vfSize & 0xFF) {
190 vf->seek(vf, size, SEEK_SET);
191 vf->write(vf, extdataBuffer, vfSize & 0xFF);
192 }
193 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
194 memset(&gb->memory.sram[vfSize], 0xFF, size - vfSize);
195 } else if (size > gb->sramSize || !gb->memory.sram) {
196 if (gb->memory.sram) {
197 vf->unmap(vf, gb->memory.sram, gb->sramSize);
198 }
199 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
200 }
201 } else {
202 if (gb->memory.sram) {
203 vf->unmap(vf, gb->memory.sram, gb->sramSize);
204 }
205 gb->memory.sram = vf->map(vf, size, MAP_READ);
206 }
207 if (gb->memory.sram == (void*) -1) {
208 gb->memory.sram = NULL;
209 }
210 } else if (size) {
211 uint8_t* newSram = anonymousMemoryMap(size);
212 if (gb->memory.sram) {
213 if (size > gb->sramSize) {
214 memcpy(newSram, gb->memory.sram, gb->sramSize);
215 memset(&newSram[gb->sramSize], 0xFF, size - gb->sramSize);
216 } else {
217 memcpy(newSram, gb->memory.sram, size);
218 }
219 mappedMemoryFree(gb->memory.sram, gb->sramSize);
220 } else {
221 memset(newSram, 0xFF, size);
222 }
223 gb->memory.sram = newSram;
224 }
225 if (gb->sramSize < size) {
226 gb->sramSize = size;
227 }
228}
229
230void GBSramClean(struct GB* gb, uint32_t frameCount) {
231 // TODO: Share with GBASavedataClean
232 if (!gb->sramVf) {
233 return;
234 }
235 if (gb->sramDirty & GB_SRAM_DIRT_NEW) {
236 gb->sramDirtAge = frameCount;
237 gb->sramDirty &= ~GB_SRAM_DIRT_NEW;
238 if (!(gb->sramDirty & GB_SRAM_DIRT_SEEN)) {
239 gb->sramDirty |= GB_SRAM_DIRT_SEEN;
240 }
241 } else if ((gb->sramDirty & GB_SRAM_DIRT_SEEN) && frameCount - gb->sramDirtAge > CLEANUP_THRESHOLD) {
242 if (gb->sramMaskWriteback) {
243 GBSavedataUnmask(gb);
244 }
245 if (gb->memory.mbcType == GB_MBC3_RTC) {
246 GBMBCRTCWrite(gb);
247 }
248 gb->sramDirty = 0;
249 if (gb->memory.sram && gb->sramVf->sync(gb->sramVf, gb->memory.sram, gb->sramSize)) {
250 mLOG(GB_MEM, INFO, "Savedata synced");
251 } else {
252 mLOG(GB_MEM, INFO, "Savedata failed to sync!");
253 }
254
255 size_t c;
256 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
257 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
258 if (callbacks->savedataUpdated) {
259 callbacks->savedataUpdated(callbacks->context);
260 }
261 }
262 }
263}
264
265void GBSavedataMask(struct GB* gb, struct VFile* vf, bool writeback) {
266 struct VFile* oldVf = gb->sramVf;
267 GBSramDeinit(gb);
268 if (oldVf && oldVf != gb->sramRealVf) {
269 oldVf->close(oldVf);
270 }
271 gb->sramVf = vf;
272 gb->sramMaskWriteback = writeback;
273 gb->memory.sram = vf->map(vf, gb->sramSize, MAP_READ);
274 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
275}
276
277void GBSavedataUnmask(struct GB* gb) {
278 if (!gb->sramRealVf || gb->sramVf == gb->sramRealVf) {
279 return;
280 }
281 struct VFile* vf = gb->sramVf;
282 GBSramDeinit(gb);
283 gb->sramVf = gb->sramRealVf;
284 gb->memory.sram = gb->sramVf->map(gb->sramVf, gb->sramSize, MAP_WRITE);
285 if (gb->sramMaskWriteback) {
286 vf->seek(vf, 0, SEEK_SET);
287 vf->read(vf, gb->memory.sram, gb->sramSize);
288 gb->sramMaskWriteback = false;
289 }
290 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
291 vf->close(vf);
292}
293
294void GBUnloadROM(struct GB* gb) {
295 // TODO: Share with GBAUnloadROM
296 if (gb->memory.rom && gb->memory.romBase != gb->memory.rom && !gb->isPristine) {
297 free(gb->memory.romBase);
298 }
299 if (gb->memory.rom && !gb->isPristine) {
300 if (gb->yankedRomSize) {
301 gb->yankedRomSize = 0;
302 }
303 mappedMemoryFree(gb->memory.rom, GB_SIZE_CART_MAX);
304 }
305
306 if (gb->romVf) {
307#ifndef FIXED_ROM_BUFFER
308 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
309#endif
310 gb->romVf->close(gb->romVf);
311 gb->romVf = NULL;
312 }
313 gb->memory.rom = NULL;
314 gb->memory.mbcType = GB_MBC_AUTODETECT;
315 gb->isPristine = false;
316
317 gb->sramMaskWriteback = false;
318 GBSramDeinit(gb);
319 if (gb->sramRealVf) {
320 gb->sramRealVf->close(gb->sramRealVf);
321 }
322 gb->sramRealVf = NULL;
323 gb->sramVf = NULL;
324 if (gb->memory.cam && gb->memory.cam->stopRequestImage) {
325 gb->memory.cam->stopRequestImage(gb->memory.cam);
326 }
327}
328
329void GBSynthesizeROM(struct VFile* vf) {
330 if (!vf) {
331 return;
332 }
333 const struct GBCartridge cart = {
334 .logo = { _knownHeader[0], _knownHeader[1], _knownHeader[2], _knownHeader[3]}
335 };
336
337 vf->seek(vf, 0x100, SEEK_SET);
338 vf->write(vf, &cart, sizeof(cart));
339}
340
341void GBLoadBIOS(struct GB* gb, struct VFile* vf) {
342 gb->biosVf = vf;
343}
344
345void GBApplyPatch(struct GB* gb, struct Patch* patch) {
346 size_t patchedSize = patch->outputSize(patch, gb->memory.romSize);
347 if (!patchedSize) {
348 return;
349 }
350 if (patchedSize > GB_SIZE_CART_MAX) {
351 patchedSize = GB_SIZE_CART_MAX;
352 }
353 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
354 if (!patch->applyPatch(patch, gb->memory.rom, gb->pristineRomSize, newRom, patchedSize)) {
355 mappedMemoryFree(newRom, GB_SIZE_CART_MAX);
356 return;
357 }
358 if (gb->romVf) {
359#ifndef FIXED_ROM_BUFFER
360 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
361#endif
362 gb->romVf->close(gb->romVf);
363 gb->romVf = NULL;
364 }
365 gb->isPristine = false;
366 if (gb->memory.romBase == gb->memory.rom) {
367 gb->memory.romBase = newRom;
368 }
369 gb->memory.rom = newRom;
370 gb->memory.romSize = patchedSize;
371 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
372 gb->cpu->memory.setActiveRegion(gb->cpu, gb->cpu->pc);
373}
374
375void GBDestroy(struct GB* gb) {
376 GBUnloadROM(gb);
377
378 if (gb->biosVf) {
379 gb->biosVf->close(gb->biosVf);
380 gb->biosVf = 0;
381 }
382
383 GBMemoryDeinit(gb);
384 GBAudioDeinit(&gb->audio);
385 GBVideoDeinit(&gb->video);
386 GBSIODeinit(&gb->sio);
387 mCoreCallbacksListDeinit(&gb->coreCallbacks);
388}
389
390void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh) {
391 irqh->reset = GBReset;
392 irqh->processEvents = GBProcessEvents;
393 irqh->setInterrupts = GBSetInterrupts;
394 irqh->irqVector = GBIRQVector;
395 irqh->hitIllegal = GBIllegal;
396 irqh->stop = GBStop;
397 irqh->halt = GBHalt;
398}
399
400static uint32_t _GBBiosCRC32(struct VFile* vf) {
401 ssize_t size = vf->size(vf);
402 if (size <= 0 || size > GB_SIZE_CART_BANK0) {
403 return 0;
404 }
405 void* bios = vf->map(vf, size, MAP_READ);
406 uint32_t biosCrc = doCrc32(bios, size);
407 vf->unmap(vf, bios, size);
408 return biosCrc;
409}
410
411bool GBIsBIOS(struct VFile* vf) {
412 switch (_GBBiosCRC32(vf)) {
413 case DMG_BIOS_CHECKSUM:
414 case DMG_2_BIOS_CHECKSUM:
415 case MGB_BIOS_CHECKSUM:
416 case SGB_BIOS_CHECKSUM:
417 case SGB2_BIOS_CHECKSUM:
418 case CGB_BIOS_CHECKSUM:
419 return true;
420 default:
421 return false;
422 }
423}
424
425void GBReset(struct SM83Core* cpu) {
426 struct GB* gb = (struct GB*) cpu->master;
427 gb->memory.romBase = gb->memory.rom;
428 GBDetectModel(gb);
429
430 cpu->b = 0;
431 cpu->d = 0;
432
433 gb->timer.internalDiv = 0;
434
435 gb->cpuBlocked = false;
436 gb->earlyExit = false;
437 gb->doubleSpeed = 0;
438
439 if (gb->yankedRomSize) {
440 gb->memory.romSize = gb->yankedRomSize;
441 gb->memory.mbcType = gb->yankedMbc;
442 gb->yankedRomSize = 0;
443 }
444
445 gb->sgbBit = -1;
446 gb->sgbControllers = 0;
447 gb->sgbCurrentController = 0;
448 gb->currentSgbBits = 0;
449 gb->sgbIncrement = false;
450 memset(gb->sgbPacket, 0, sizeof(gb->sgbPacket));
451
452 mTimingClear(&gb->timing);
453
454 GBMemoryReset(gb);
455
456 if (gb->biosVf) {
457 if (!GBIsBIOS(gb->biosVf)) {
458 gb->biosVf->close(gb->biosVf);
459 gb->biosVf = NULL;
460 } else {
461 GBMapBIOS(gb);
462 cpu->a = 0;
463 cpu->f.packed = 0;
464 cpu->c = 0;
465 cpu->e = 0;
466 cpu->h = 0;
467 cpu->l = 0;
468 cpu->sp = 0;
469 cpu->pc = 0;
470 }
471 }
472
473 GBVideoReset(&gb->video);
474 GBTimerReset(&gb->timer);
475 GBIOReset(gb);
476 if (!gb->biosVf && gb->memory.rom) {
477 GBSkipBIOS(gb);
478 } else {
479 mTimingSchedule(&gb->timing, &gb->timer.event, 0);
480 }
481
482 GBAudioReset(&gb->audio);
483 GBSIOReset(&gb->sio);
484
485 cpu->memory.setActiveRegion(cpu, cpu->pc);
486
487 gb->sramMaskWriteback = false;
488 GBSavedataUnmask(gb);
489}
490
491void GBSkipBIOS(struct GB* gb) {
492 struct SM83Core* cpu = gb->cpu;
493 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
494 int nextDiv = 0;
495
496 switch (gb->model) {
497 case GB_MODEL_AUTODETECT: // Silence warnings
498 gb->model = GB_MODEL_DMG;
499 // Fall through
500 case GB_MODEL_DMG:
501 cpu->a = 1;
502 cpu->f.packed = 0xB0;
503 cpu->c = 0x13;
504 cpu->e = 0xD8;
505 cpu->h = 1;
506 cpu->l = 0x4D;
507 gb->timer.internalDiv = 0xABC;
508 nextDiv = 4;
509 break;
510 case GB_MODEL_SGB:
511 cpu->a = 1;
512 cpu->f.packed = 0x00;
513 cpu->c = 0x14;
514 cpu->e = 0x00;
515 cpu->h = 0xC0;
516 cpu->l = 0x60;
517 gb->timer.internalDiv = 0xD85;
518 nextDiv = 8;
519 break;
520 case GB_MODEL_MGB:
521 cpu->a = 0xFF;
522 cpu->f.packed = 0xB0;
523 cpu->c = 0x13;
524 cpu->e = 0xD8;
525 cpu->h = 1;
526 cpu->l = 0x4D;
527 gb->timer.internalDiv = 0xABC;
528 nextDiv = 4;
529 break;
530 case GB_MODEL_SGB2:
531 cpu->a = 0xFF;
532 cpu->f.packed = 0x00;
533 cpu->c = 0x14;
534 cpu->e = 0x00;
535 cpu->h = 0xC0;
536 cpu->l = 0x60;
537 gb->timer.internalDiv = 0xD84;
538 nextDiv = 8;
539 break;
540 case GB_MODEL_AGB:
541 cpu->b = 1;
542 // Fall through
543 case GB_MODEL_CGB:
544 cpu->a = 0x11;
545 if (gb->model == GB_MODEL_AGB) {
546 cpu->f.packed = 0x00;
547 } else {
548 cpu->f.packed = 0x80;
549 }
550 cpu->c = 0;
551 cpu->h = 0;
552 if (cart->cgb & 0x80) {
553 cpu->d = 0xFF;
554 cpu->e = 0x56;
555 cpu->l = 0x0D;
556 gb->timer.internalDiv = 0x2F0;
557 } else {
558 cpu->e = 0x08;
559 cpu->l = 0x7C;
560 gb->timer.internalDiv = 0x260;
561 gb->model = GB_MODEL_DMG;
562 gb->memory.io[GB_REG_KEY1] = 0xFF;
563 gb->memory.io[GB_REG_BCPS] = 0x88; // Faked writing 4 BG palette entries
564 gb->memory.io[GB_REG_OCPS] = 0x90; // Faked writing 8 OBJ palette entries
565 gb->memory.io[GB_REG_SVBK] = 0xFF;
566 GBVideoDisableCGB(&gb->video);
567 }
568 nextDiv = 0xC;
569 break;
570 }
571
572 cpu->sp = 0xFFFE;
573 cpu->pc = 0x100;
574
575 gb->timer.nextDiv = GB_DMG_DIV_PERIOD * (16 - nextDiv);
576
577 mTimingDeschedule(&gb->timing, &gb->timer.event);
578 mTimingSchedule(&gb->timing, &gb->timer.event, gb->timer.nextDiv);
579
580 GBIOWrite(gb, GB_REG_LCDC, 0x91);
581 GBVideoSkipBIOS(&gb->video);
582
583 if (gb->biosVf) {
584 GBUnmapBIOS(gb);
585 }
586}
587
588void GBMapBIOS(struct GB* gb) {
589 gb->biosVf->seek(gb->biosVf, 0, SEEK_SET);
590 uint8_t* oldRomBase = gb->memory.romBase;
591 gb->memory.romBase = malloc(GB_SIZE_CART_BANK0);
592 ssize_t size = gb->biosVf->read(gb->biosVf, gb->memory.romBase, GB_SIZE_CART_BANK0);
593 memcpy(&gb->memory.romBase[size], &oldRomBase[size], GB_SIZE_CART_BANK0 - size);
594 if (size > 0x100) {
595 memcpy(&gb->memory.romBase[0x100], &oldRomBase[0x100], sizeof(struct GBCartridge));
596 }
597}
598
599void GBUnmapBIOS(struct GB* gb) {
600 if (gb->memory.romBase < gb->memory.rom || gb->memory.romBase > &gb->memory.rom[gb->memory.romSize - 1]) {
601 free(gb->memory.romBase);
602 if (gb->memory.mbcType == GB_MMM01) {
603 GBMBCSwitchBank0(gb, gb->memory.romSize / GB_SIZE_CART_BANK0 - 2);
604 } else {
605 GBMBCSwitchBank0(gb, 0);
606 }
607 }
608 // XXX: Force AGB registers for AGB-mode
609 if (gb->model == GB_MODEL_AGB && gb->cpu->pc == 0x100) {
610 gb->cpu->b = 1;
611 }
612}
613
614void GBDetectModel(struct GB* gb) {
615 if (gb->model != GB_MODEL_AUTODETECT) {
616 return;
617 }
618 if (gb->biosVf) {
619 switch (_GBBiosCRC32(gb->biosVf)) {
620 case DMG_BIOS_CHECKSUM:
621 case DMG_2_BIOS_CHECKSUM:
622 gb->model = GB_MODEL_DMG;
623 break;
624 case MGB_BIOS_CHECKSUM:
625 gb->model = GB_MODEL_MGB;
626 break;
627 case SGB_BIOS_CHECKSUM:
628 gb->model = GB_MODEL_SGB;
629 break;
630 case SGB2_BIOS_CHECKSUM:
631 gb->model = GB_MODEL_SGB2;
632 break;
633 case CGB_BIOS_CHECKSUM:
634 gb->model = GB_MODEL_CGB;
635 break;
636 default:
637 gb->biosVf->close(gb->biosVf);
638 gb->biosVf = NULL;
639 }
640 }
641 if (gb->model == GB_MODEL_AUTODETECT && gb->memory.rom) {
642 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
643 if (cart->cgb & 0x80) {
644 gb->model = GB_MODEL_CGB;
645 } else if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
646 gb->model = GB_MODEL_SGB;
647 } else {
648 gb->model = GB_MODEL_DMG;
649 }
650 }
651
652 switch (gb->model) {
653 case GB_MODEL_DMG:
654 case GB_MODEL_SGB:
655 case GB_MODEL_AUTODETECT: //Silence warnings
656 gb->audio.style = GB_AUDIO_DMG;
657 break;
658 case GB_MODEL_MGB:
659 case GB_MODEL_SGB2:
660 gb->audio.style = GB_AUDIO_MGB;
661 break;
662 case GB_MODEL_AGB:
663 case GB_MODEL_CGB:
664 gb->audio.style = GB_AUDIO_CGB;
665 break;
666 }
667}
668
669int GBValidModels(const uint8_t* bank0) {
670 const struct GBCartridge* cart = (const struct GBCartridge*) &bank0[0x100];
671 int models;
672 if (cart->cgb == 0x80) {
673 models = GB_MODEL_CGB | GB_MODEL_MGB;
674 } else if (cart->cgb == 0xC0) {
675 models = GB_MODEL_CGB;
676 } else {
677 models = GB_MODEL_MGB;
678 }
679 if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
680 models |= GB_MODEL_SGB;
681 }
682 return models;
683}
684
685void GBUpdateIRQs(struct GB* gb) {
686 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F;
687 if (!irqs) {
688 gb->cpu->irqPending = false;
689 return;
690 }
691 gb->cpu->halted = false;
692
693 if (!gb->memory.ime) {
694 gb->cpu->irqPending = false;
695 return;
696 }
697 if (gb->cpu->irqPending) {
698 return;
699 }
700 SM83RaiseIRQ(gb->cpu);
701}
702
703void GBProcessEvents(struct SM83Core* cpu) {
704 struct GB* gb = (struct GB*) cpu->master;
705 do {
706 int32_t cycles = cpu->cycles;
707 int32_t nextEvent;
708
709 cpu->cycles = 0;
710 cpu->nextEvent = INT_MAX;
711
712 nextEvent = cycles;
713 do {
714#ifdef USE_DEBUGGERS
715 gb->timing.globalCycles += nextEvent;
716#endif
717 nextEvent = mTimingTick(&gb->timing, nextEvent);
718 } while (gb->cpuBlocked);
719 // This loop cannot early exit until the SM83 run loop properly handles mid-M-cycle-exits
720 cpu->nextEvent = nextEvent;
721
722 if (cpu->halted) {
723 cpu->cycles = cpu->nextEvent;
724 if (!gb->memory.ie || !gb->memory.ime) {
725 break;
726 }
727 }
728 if (gb->earlyExit) {
729 break;
730 }
731 } while (cpu->cycles >= cpu->nextEvent);
732 gb->earlyExit = false;
733 if (gb->cpuBlocked) {
734 cpu->cycles = cpu->nextEvent;
735 }
736}
737
738void GBSetInterrupts(struct SM83Core* cpu, bool enable) {
739 struct GB* gb = (struct GB*) cpu->master;
740 mTimingDeschedule(&gb->timing, &gb->eiPending);
741 if (!enable) {
742 gb->memory.ime = false;
743 GBUpdateIRQs(gb);
744 } else {
745 mTimingSchedule(&gb->timing, &gb->eiPending, 4);
746 }
747}
748
749uint16_t GBIRQVector(struct SM83Core* cpu) {
750 struct GB* gb = (struct GB*) cpu->master;
751 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF];
752
753 if (irqs & (1 << GB_IRQ_VBLANK)) {
754 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_VBLANK);
755 return GB_VECTOR_VBLANK;
756 }
757 if (irqs & (1 << GB_IRQ_LCDSTAT)) {
758 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_LCDSTAT);
759 return GB_VECTOR_LCDSTAT;
760 }
761 if (irqs & (1 << GB_IRQ_TIMER)) {
762 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_TIMER);
763 return GB_VECTOR_TIMER;
764 }
765 if (irqs & (1 << GB_IRQ_SIO)) {
766 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_SIO);
767 return GB_VECTOR_SIO;
768 }
769 if (irqs & (1 << GB_IRQ_KEYPAD)) {
770 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_KEYPAD);
771 return GB_VECTOR_KEYPAD;
772 }
773 return 0;
774}
775
776static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate) {
777 UNUSED(timing);
778 UNUSED(cyclesLate);
779 struct GB* gb = user;
780 gb->memory.ime = true;
781 GBUpdateIRQs(gb);
782}
783
784void GBHalt(struct SM83Core* cpu) {
785 struct GB* gb = (struct GB*) cpu->master;
786 if (!(gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F)) {
787 cpu->cycles = cpu->nextEvent;
788 cpu->halted = true;
789 } else if (!gb->memory.ime) {
790 mLOG(GB, GAME_ERROR, "HALT bug");
791 cpu->executionState = SM83_CORE_HALT_BUG;
792 }
793}
794
795void GBStop(struct SM83Core* cpu) {
796 struct GB* gb = (struct GB*) cpu->master;
797 if (gb->model >= GB_MODEL_CGB && gb->memory.io[GB_REG_KEY1] & 1) {
798 gb->doubleSpeed ^= 1;
799 gb->audio.timingFactor = gb->doubleSpeed + 1;
800 gb->memory.io[GB_REG_KEY1] = 0;
801 gb->memory.io[GB_REG_KEY1] |= gb->doubleSpeed << 7;
802 } else {
803 int sleep = ~(gb->memory.io[GB_REG_JOYP] & 0x30);
804 size_t c;
805 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
806 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
807 if (sleep && callbacks->sleep) {
808 callbacks->sleep(callbacks->context);
809 } else if (callbacks->shutdown) {
810 callbacks->shutdown(callbacks->context);
811 }
812 }
813 }
814}
815
816void GBIllegal(struct SM83Core* cpu) {
817 struct GB* gb = (struct GB*) cpu->master;
818 mLOG(GB, GAME_ERROR, "Hit illegal opcode at address %04X:%02X", cpu->pc, cpu->bus);
819#ifdef USE_DEBUGGERS
820 if (cpu->components && cpu->components[CPU_COMPONENT_DEBUGGER]) {
821 struct mDebuggerEntryInfo info = {
822 .address = cpu->pc,
823 .type.bp.opcode = cpu->bus
824 };
825 mDebuggerEnter((struct mDebugger*) cpu->components[CPU_COMPONENT_DEBUGGER], DEBUGGER_ENTER_ILLEGAL_OP, &info);
826 }
827#endif
828 // Hang forever
829 gb->memory.ime = 0;
830 --cpu->pc;
831}
832
833bool GBIsROM(struct VFile* vf) {
834 if (!vf) {
835 return false;
836 }
837 vf->seek(vf, 0x104, SEEK_SET);
838 uint8_t header[4];
839
840 if (vf->read(vf, &header, sizeof(header)) < (ssize_t) sizeof(header)) {
841 return false;
842 }
843 if (memcmp(header, _knownHeader, sizeof(header))) {
844 return false;
845 }
846 return true;
847}
848
849void GBGetGameTitle(const struct GB* gb, char* out) {
850 const struct GBCartridge* cart = NULL;
851 if (gb->memory.rom) {
852 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
853 }
854 if (!cart) {
855 return;
856 }
857 if (cart->oldLicensee != 0x33) {
858 memcpy(out, cart->titleLong, 16);
859 } else {
860 memcpy(out, cart->titleShort, 11);
861 }
862}
863
864void GBGetGameCode(const struct GB* gb, char* out) {
865 memset(out, 0, 8);
866 const struct GBCartridge* cart = NULL;
867 if (gb->memory.rom) {
868 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
869 }
870 if (!cart) {
871 return;
872 }
873 if (cart->cgb == 0xC0) {
874 memcpy(out, "CGB-????", 8);
875 } else {
876 memcpy(out, "DMG-????", 8);
877 }
878 if (cart->oldLicensee == 0x33) {
879 memcpy(&out[4], cart->maker, 4);
880 }
881}
882
883void GBFrameStarted(struct GB* gb) {
884 GBTestKeypadIRQ(gb);
885
886 size_t c;
887 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
888 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
889 if (callbacks->videoFrameStarted) {
890 callbacks->videoFrameStarted(callbacks->context);
891 }
892 }
893}
894
895void GBFrameEnded(struct GB* gb) {
896 GBSramClean(gb, gb->video.frameCounter);
897
898 if (gb->cpu->components && gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE]) {
899 struct mCheatDevice* device = (struct mCheatDevice*) gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE];
900 size_t i;
901 for (i = 0; i < mCheatSetsSize(&device->cheats); ++i) {
902 struct mCheatSet* cheats = *mCheatSetsGetPointer(&device->cheats, i);
903 mCheatRefresh(device, cheats);
904 }
905 }
906
907 // TODO: Move to common code
908 if (gb->stream && gb->stream->postVideoFrame) {
909 const color_t* pixels;
910 size_t stride;
911 gb->video.renderer->getPixels(gb->video.renderer, &stride, (const void**) &pixels);
912 gb->stream->postVideoFrame(gb->stream, pixels, stride);
913 }
914
915 size_t c;
916 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
917 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
918 if (callbacks->videoFrameEnded) {
919 callbacks->videoFrameEnded(callbacks->context);
920 }
921 }
922}
923
924enum GBModel GBNameToModel(const char* model) {
925 if (strcasecmp(model, "DMG") == 0) {
926 return GB_MODEL_DMG;
927 } else if (strcasecmp(model, "CGB") == 0) {
928 return GB_MODEL_CGB;
929 } else if (strcasecmp(model, "AGB") == 0) {
930 return GB_MODEL_AGB;
931 } else if (strcasecmp(model, "SGB") == 0) {
932 return GB_MODEL_SGB;
933 } else if (strcasecmp(model, "MGB") == 0) {
934 return GB_MODEL_MGB;
935 } else if (strcasecmp(model, "SGB2") == 0) {
936 return GB_MODEL_SGB2;
937 }
938 return GB_MODEL_AUTODETECT;
939}
940
941const char* GBModelToName(enum GBModel model) {
942 switch (model) {
943 case GB_MODEL_DMG:
944 return "DMG";
945 case GB_MODEL_SGB:
946 return "SGB";
947 case GB_MODEL_MGB:
948 return "MGB";
949 case GB_MODEL_SGB2:
950 return "SGB2";
951 case GB_MODEL_CGB:
952 return "CGB";
953 case GB_MODEL_AGB:
954 return "AGB";
955 default:
956 case GB_MODEL_AUTODETECT:
957 return NULL;
958 }
959}