src/gb/gb.c (view raw)
1/* Copyright (c) 2013-2016 Jeffrey Pfau
2 *
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6#include <mgba/internal/gb/gb.h>
7
8#include <mgba/internal/gb/io.h>
9#include <mgba/internal/gb/mbc.h>
10#include <mgba/internal/sm83/sm83.h>
11
12#include <mgba/core/core.h>
13#include <mgba/core/cheats.h>
14#include <mgba-util/crc32.h>
15#include <mgba-util/memory.h>
16#include <mgba-util/math.h>
17#include <mgba-util/patch.h>
18#include <mgba-util/vfs.h>
19
20#define CLEANUP_THRESHOLD 15
21
22const uint32_t CGB_SM83_FREQUENCY = 0x800000;
23const uint32_t SGB_SM83_FREQUENCY = 0x418B1E;
24
25const uint32_t GB_COMPONENT_MAGIC = 0x400000;
26
27static const uint8_t _knownHeader[4] = { 0xCE, 0xED, 0x66, 0x66};
28
29#define DMG_BIOS_CHECKSUM 0xC2F5CC97
30#define DMG_2_BIOS_CHECKSUM 0x59C8598E
31#define MGB_BIOS_CHECKSUM 0xE6920754
32#define SGB_BIOS_CHECKSUM 0xEC8A83B9
33#define SGB2_BIOS_CHECKSUM 0X53D0DD63
34#define CGB_BIOS_CHECKSUM 0x41884E46
35
36mLOG_DEFINE_CATEGORY(GB, "GB", "gb");
37
38static void GBInit(void* cpu, struct mCPUComponent* component);
39static void GBDeinit(struct mCPUComponent* component);
40static void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh);
41static void GBProcessEvents(struct SM83Core* cpu);
42static void GBSetInterrupts(struct SM83Core* cpu, bool enable);
43static uint16_t GBIRQVector(struct SM83Core* cpu);
44static void GBIllegal(struct SM83Core* cpu);
45static void GBStop(struct SM83Core* cpu);
46
47static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate);
48
49void GBCreate(struct GB* gb) {
50 gb->d.id = GB_COMPONENT_MAGIC;
51 gb->d.init = GBInit;
52 gb->d.deinit = GBDeinit;
53}
54
55static void GBInit(void* cpu, struct mCPUComponent* component) {
56 struct GB* gb = (struct GB*) component;
57 gb->cpu = cpu;
58 gb->sync = NULL;
59
60 GBInterruptHandlerInit(&gb->cpu->irqh);
61 GBMemoryInit(gb);
62
63 gb->video.p = gb;
64 GBVideoInit(&gb->video);
65
66 gb->audio.p = gb;
67 GBAudioInit(&gb->audio, 2048, &gb->memory.io[GB_REG_NR52], GB_AUDIO_DMG); // TODO: Remove magic constant
68
69 gb->sio.p = gb;
70 GBSIOInit(&gb->sio);
71
72 gb->timer.p = gb;
73
74 gb->model = GB_MODEL_AUTODETECT;
75
76 gb->biosVf = NULL;
77 gb->romVf = NULL;
78 gb->sramVf = NULL;
79 gb->sramRealVf = NULL;
80
81 gb->isPristine = false;
82 gb->pristineRomSize = 0;
83 gb->yankedRomSize = 0;
84
85 mCoreCallbacksListInit(&gb->coreCallbacks, 0);
86 gb->stream = NULL;
87
88 mTimingInit(&gb->timing, &gb->cpu->cycles, &gb->cpu->nextEvent);
89 gb->audio.timing = &gb->timing;
90
91 gb->eiPending.name = "GB EI";
92 gb->eiPending.callback = _enableInterrupts;
93 gb->eiPending.context = gb;
94 gb->eiPending.priority = 0;
95}
96
97static void GBDeinit(struct mCPUComponent* component) {
98 struct GB* gb = (struct GB*) component;
99 mTimingDeinit(&gb->timing);
100}
101
102bool GBLoadROM(struct GB* gb, struct VFile* vf) {
103 if (!vf) {
104 return false;
105 }
106 GBUnloadROM(gb);
107 gb->romVf = vf;
108 gb->pristineRomSize = vf->size(vf);
109 vf->seek(vf, 0, SEEK_SET);
110 gb->isPristine = true;
111 gb->memory.rom = vf->map(vf, gb->pristineRomSize, MAP_READ);
112 if (!gb->memory.rom) {
113 return false;
114 }
115 gb->yankedRomSize = 0;
116 gb->memory.romSize = gb->pristineRomSize;
117 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
118 memset(&gb->memory.mbcState, 0, sizeof(gb->memory.mbcState));
119 GBMBCReset(gb);
120
121 if (gb->cpu) {
122 struct SM83Core* cpu = gb->cpu;
123 cpu->memory.setActiveRegion(cpu, cpu->pc);
124 }
125
126 // TODO: error check
127 return true;
128}
129
130void GBYankROM(struct GB* gb) {
131 gb->yankedRomSize = gb->memory.romSize;
132 gb->yankedMbc = gb->memory.mbcType;
133 gb->memory.romSize = 0;
134 gb->memory.mbcType = GB_MBC_NONE;
135 GBMBCReset(gb);
136
137 if (gb->cpu) {
138 struct SM83Core* cpu = gb->cpu;
139 cpu->memory.setActiveRegion(cpu, cpu->pc);
140 }
141}
142
143static void GBSramDeinit(struct GB* gb) {
144 if (gb->sramVf) {
145 gb->sramVf->unmap(gb->sramVf, gb->memory.sram, gb->sramSize);
146 if (gb->memory.mbcType == GB_MBC3_RTC && gb->sramVf == gb->sramRealVf) {
147 GBMBCRTCWrite(gb);
148 }
149 gb->sramVf = NULL;
150 } else if (gb->memory.sram) {
151 mappedMemoryFree(gb->memory.sram, gb->sramSize);
152 }
153 gb->memory.sram = 0;
154}
155
156bool GBLoadSave(struct GB* gb, struct VFile* vf) {
157 GBSramDeinit(gb);
158 gb->sramVf = vf;
159 gb->sramRealVf = vf;
160 if (gb->sramSize) {
161 GBResizeSram(gb, gb->sramSize);
162 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
163
164 if (gb->memory.mbcType == GB_MBC3_RTC) {
165 GBMBCRTCRead(gb);
166 }
167 }
168 return vf;
169}
170
171void GBResizeSram(struct GB* gb, size_t size) {
172 if (gb->memory.sram && size <= gb->sramSize) {
173 return;
174 }
175 struct VFile* vf = gb->sramVf;
176 if (vf) {
177 if (vf == gb->sramRealVf) {
178 ssize_t vfSize = vf->size(vf);
179 if (vfSize >= 0 && (size_t) vfSize < size) {
180 uint8_t extdataBuffer[0x100];
181 if (vfSize & 0xFF) {
182 vf->seek(vf, -(vfSize & 0xFF), SEEK_END);
183 vf->read(vf, extdataBuffer, vfSize & 0xFF);
184 }
185 if (gb->memory.sram) {
186 vf->unmap(vf, gb->memory.sram, gb->sramSize);
187 }
188 vf->truncate(vf, size + (vfSize & 0xFF));
189 if (vfSize & 0xFF) {
190 vf->seek(vf, size, SEEK_SET);
191 vf->write(vf, extdataBuffer, vfSize & 0xFF);
192 }
193 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
194 memset(&gb->memory.sram[vfSize], 0xFF, size - vfSize);
195 } else if (size > gb->sramSize || !gb->memory.sram) {
196 if (gb->memory.sram) {
197 vf->unmap(vf, gb->memory.sram, gb->sramSize);
198 }
199 gb->memory.sram = vf->map(vf, size, MAP_WRITE);
200 }
201 } else {
202 if (gb->memory.sram) {
203 vf->unmap(vf, gb->memory.sram, gb->sramSize);
204 }
205 gb->memory.sram = vf->map(vf, size, MAP_READ);
206 }
207 if (gb->memory.sram == (void*) -1) {
208 gb->memory.sram = NULL;
209 }
210 } else if (size) {
211 uint8_t* newSram = anonymousMemoryMap(size);
212 if (gb->memory.sram) {
213 if (size > gb->sramSize) {
214 memcpy(newSram, gb->memory.sram, gb->sramSize);
215 memset(&newSram[gb->sramSize], 0xFF, size - gb->sramSize);
216 } else {
217 memcpy(newSram, gb->memory.sram, size);
218 }
219 mappedMemoryFree(gb->memory.sram, gb->sramSize);
220 } else {
221 memset(newSram, 0xFF, size);
222 }
223 gb->memory.sram = newSram;
224 }
225 if (gb->sramSize < size) {
226 gb->sramSize = size;
227 }
228}
229
230void GBSramClean(struct GB* gb, uint32_t frameCount) {
231 // TODO: Share with GBASavedataClean
232 if (!gb->sramVf) {
233 return;
234 }
235 if (gb->sramDirty & GB_SRAM_DIRT_NEW) {
236 gb->sramDirtAge = frameCount;
237 gb->sramDirty &= ~GB_SRAM_DIRT_NEW;
238 if (!(gb->sramDirty & GB_SRAM_DIRT_SEEN)) {
239 gb->sramDirty |= GB_SRAM_DIRT_SEEN;
240 }
241 } else if ((gb->sramDirty & GB_SRAM_DIRT_SEEN) && frameCount - gb->sramDirtAge > CLEANUP_THRESHOLD) {
242 if (gb->sramMaskWriteback) {
243 GBSavedataUnmask(gb);
244 }
245 if (gb->memory.mbcType == GB_MBC3_RTC) {
246 GBMBCRTCWrite(gb);
247 }
248 gb->sramDirty = 0;
249 if (gb->memory.sram && gb->sramVf->sync(gb->sramVf, gb->memory.sram, gb->sramSize)) {
250 mLOG(GB_MEM, INFO, "Savedata synced");
251 } else {
252 mLOG(GB_MEM, INFO, "Savedata failed to sync!");
253 }
254
255 size_t c;
256 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
257 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
258 if (callbacks->savedataUpdated) {
259 callbacks->savedataUpdated(callbacks->context);
260 }
261 }
262 }
263}
264
265void GBSavedataMask(struct GB* gb, struct VFile* vf, bool writeback) {
266 struct VFile* oldVf = gb->sramVf;
267 GBSramDeinit(gb);
268 if (oldVf && oldVf != gb->sramRealVf) {
269 oldVf->close(oldVf);
270 }
271 gb->sramVf = vf;
272 gb->sramMaskWriteback = writeback;
273 gb->memory.sram = vf->map(vf, gb->sramSize, MAP_READ);
274 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
275}
276
277void GBSavedataUnmask(struct GB* gb) {
278 if (!gb->sramRealVf || gb->sramVf == gb->sramRealVf) {
279 return;
280 }
281 struct VFile* vf = gb->sramVf;
282 GBSramDeinit(gb);
283 gb->sramVf = gb->sramRealVf;
284 gb->memory.sram = gb->sramVf->map(gb->sramVf, gb->sramSize, MAP_WRITE);
285 if (gb->sramMaskWriteback) {
286 vf->seek(vf, 0, SEEK_SET);
287 vf->read(vf, gb->memory.sram, gb->sramSize);
288 gb->sramMaskWriteback = false;
289 }
290 GBMBCSwitchSramBank(gb, gb->memory.sramCurrentBank);
291 vf->close(vf);
292}
293
294void GBUnloadROM(struct GB* gb) {
295 // TODO: Share with GBAUnloadROM
296 if (gb->memory.rom && !gb->isPristine) {
297 if (gb->yankedRomSize) {
298 gb->yankedRomSize = 0;
299 }
300 mappedMemoryFree(gb->memory.rom, GB_SIZE_CART_MAX);
301 }
302
303 if (gb->romVf) {
304#ifndef FIXED_ROM_BUFFER
305 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
306#endif
307 gb->romVf->close(gb->romVf);
308 gb->romVf = NULL;
309 }
310 gb->memory.rom = NULL;
311 gb->memory.mbcType = GB_MBC_AUTODETECT;
312 gb->isPristine = false;
313
314 gb->sramMaskWriteback = false;
315 GBSavedataUnmask(gb);
316 GBSramDeinit(gb);
317 if (gb->sramRealVf) {
318 gb->sramRealVf->close(gb->sramRealVf);
319 }
320 gb->sramRealVf = NULL;
321 gb->sramVf = NULL;
322 if (gb->memory.cam && gb->memory.cam->stopRequestImage) {
323 gb->memory.cam->stopRequestImage(gb->memory.cam);
324 }
325}
326
327void GBSynthesizeROM(struct VFile* vf) {
328 if (!vf) {
329 return;
330 }
331 const struct GBCartridge cart = {
332 .logo = { _knownHeader[0], _knownHeader[1], _knownHeader[2], _knownHeader[3]}
333 };
334
335 vf->seek(vf, 0x100, SEEK_SET);
336 vf->write(vf, &cart, sizeof(cart));
337}
338
339void GBLoadBIOS(struct GB* gb, struct VFile* vf) {
340 gb->biosVf = vf;
341}
342
343void GBApplyPatch(struct GB* gb, struct Patch* patch) {
344 size_t patchedSize = patch->outputSize(patch, gb->memory.romSize);
345 if (!patchedSize) {
346 return;
347 }
348 if (patchedSize > GB_SIZE_CART_MAX) {
349 patchedSize = GB_SIZE_CART_MAX;
350 }
351 void* newRom = anonymousMemoryMap(GB_SIZE_CART_MAX);
352 if (!patch->applyPatch(patch, gb->memory.rom, gb->pristineRomSize, newRom, patchedSize)) {
353 mappedMemoryFree(newRom, GB_SIZE_CART_MAX);
354 return;
355 }
356 if (gb->romVf) {
357#ifndef FIXED_ROM_BUFFER
358 gb->romVf->unmap(gb->romVf, gb->memory.rom, gb->pristineRomSize);
359#endif
360 gb->romVf->close(gb->romVf);
361 gb->romVf = NULL;
362 }
363 gb->isPristine = false;
364 if (gb->memory.romBase == gb->memory.rom) {
365 gb->memory.romBase = newRom;
366 }
367 gb->memory.rom = newRom;
368 gb->memory.romSize = patchedSize;
369 gb->romCrc32 = doCrc32(gb->memory.rom, gb->memory.romSize);
370 gb->cpu->memory.setActiveRegion(gb->cpu, gb->cpu->pc);
371}
372
373void GBDestroy(struct GB* gb) {
374 GBUnmapBIOS(gb);
375 GBUnloadROM(gb);
376
377 if (gb->biosVf) {
378 gb->biosVf->close(gb->biosVf);
379 gb->biosVf = 0;
380 }
381
382 GBMemoryDeinit(gb);
383 GBAudioDeinit(&gb->audio);
384 GBVideoDeinit(&gb->video);
385 GBSIODeinit(&gb->sio);
386 mCoreCallbacksListDeinit(&gb->coreCallbacks);
387}
388
389void GBInterruptHandlerInit(struct SM83InterruptHandler* irqh) {
390 irqh->reset = GBReset;
391 irqh->processEvents = GBProcessEvents;
392 irqh->setInterrupts = GBSetInterrupts;
393 irqh->irqVector = GBIRQVector;
394 irqh->hitIllegal = GBIllegal;
395 irqh->stop = GBStop;
396 irqh->halt = GBHalt;
397}
398
399static uint32_t _GBBiosCRC32(struct VFile* vf) {
400 ssize_t size = vf->size(vf);
401 if (size <= 0 || size > GB_SIZE_CART_BANK0) {
402 return 0;
403 }
404 void* bios = vf->map(vf, size, MAP_READ);
405 uint32_t biosCrc = doCrc32(bios, size);
406 vf->unmap(vf, bios, size);
407 return biosCrc;
408}
409
410bool GBIsBIOS(struct VFile* vf) {
411 switch (_GBBiosCRC32(vf)) {
412 case DMG_BIOS_CHECKSUM:
413 case DMG_2_BIOS_CHECKSUM:
414 case MGB_BIOS_CHECKSUM:
415 case SGB_BIOS_CHECKSUM:
416 case SGB2_BIOS_CHECKSUM:
417 case CGB_BIOS_CHECKSUM:
418 return true;
419 default:
420 return false;
421 }
422}
423
424void GBReset(struct SM83Core* cpu) {
425 struct GB* gb = (struct GB*) cpu->master;
426 gb->memory.romBase = gb->memory.rom;
427 GBDetectModel(gb);
428
429 cpu->b = 0;
430 cpu->d = 0;
431
432 gb->timer.internalDiv = 0;
433
434 gb->cpuBlocked = false;
435 gb->earlyExit = false;
436 gb->doubleSpeed = 0;
437
438 if (gb->yankedRomSize) {
439 gb->memory.romSize = gb->yankedRomSize;
440 gb->memory.mbcType = gb->yankedMbc;
441 gb->yankedRomSize = 0;
442 }
443
444 gb->sgbBit = -1;
445 gb->sgbControllers = 0;
446 gb->sgbCurrentController = 0;
447 gb->currentSgbBits = 0;
448 gb->sgbIncrement = false;
449 memset(gb->sgbPacket, 0, sizeof(gb->sgbPacket));
450
451 mTimingClear(&gb->timing);
452
453 GBMemoryReset(gb);
454
455 if (gb->biosVf) {
456 if (!GBIsBIOS(gb->biosVf)) {
457 gb->biosVf->close(gb->biosVf);
458 gb->biosVf = NULL;
459 } else {
460 GBMapBIOS(gb);
461 cpu->a = 0;
462 cpu->f.packed = 0;
463 cpu->c = 0;
464 cpu->e = 0;
465 cpu->h = 0;
466 cpu->l = 0;
467 cpu->sp = 0;
468 cpu->pc = 0;
469 }
470 }
471
472 GBVideoReset(&gb->video);
473 GBTimerReset(&gb->timer);
474 GBIOReset(gb);
475 if (!gb->biosVf && gb->memory.rom) {
476 GBSkipBIOS(gb);
477 } else {
478 mTimingSchedule(&gb->timing, &gb->timer.event, 0);
479 }
480
481 GBAudioReset(&gb->audio);
482 GBSIOReset(&gb->sio);
483
484 cpu->memory.setActiveRegion(cpu, cpu->pc);
485
486 gb->sramMaskWriteback = false;
487 GBSavedataUnmask(gb);
488}
489
490void GBSkipBIOS(struct GB* gb) {
491 struct SM83Core* cpu = gb->cpu;
492 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
493 int nextDiv = 0;
494
495 switch (gb->model) {
496 case GB_MODEL_AUTODETECT: // Silence warnings
497 gb->model = GB_MODEL_DMG;
498 // Fall through
499 case GB_MODEL_DMG:
500 cpu->a = 1;
501 cpu->f.packed = 0xB0;
502 cpu->c = 0x13;
503 cpu->e = 0xD8;
504 cpu->h = 1;
505 cpu->l = 0x4D;
506 gb->timer.internalDiv = 0xABC;
507 nextDiv = 4;
508 break;
509 case GB_MODEL_SGB:
510 cpu->a = 1;
511 cpu->f.packed = 0x00;
512 cpu->c = 0x14;
513 cpu->e = 0x00;
514 cpu->h = 0xC0;
515 cpu->l = 0x60;
516 gb->timer.internalDiv = 0xD85;
517 nextDiv = 8;
518 break;
519 case GB_MODEL_MGB:
520 cpu->a = 0xFF;
521 cpu->f.packed = 0xB0;
522 cpu->c = 0x13;
523 cpu->e = 0xD8;
524 cpu->h = 1;
525 cpu->l = 0x4D;
526 gb->timer.internalDiv = 0xABC;
527 nextDiv = 4;
528 break;
529 case GB_MODEL_SGB2:
530 cpu->a = 0xFF;
531 cpu->f.packed = 0x00;
532 cpu->c = 0x14;
533 cpu->e = 0x00;
534 cpu->h = 0xC0;
535 cpu->l = 0x60;
536 gb->timer.internalDiv = 0xD84;
537 nextDiv = 8;
538 break;
539 case GB_MODEL_AGB:
540 cpu->b = 1;
541 // Fall through
542 case GB_MODEL_CGB:
543 cpu->a = 0x11;
544 if (gb->model == GB_MODEL_AGB) {
545 cpu->f.packed = 0x00;
546 } else {
547 cpu->f.packed = 0x80;
548 }
549 cpu->c = 0;
550 cpu->h = 0;
551 if (cart->cgb & 0x80) {
552 cpu->d = 0xFF;
553 cpu->e = 0x56;
554 cpu->l = 0x0D;
555 gb->timer.internalDiv = 0x2F0;
556 } else {
557 cpu->e = 0x08;
558 cpu->l = 0x7C;
559 gb->timer.internalDiv = 0x260;
560 gb->model = GB_MODEL_DMG;
561 gb->memory.io[GB_REG_KEY1] = 0xFF;
562 gb->memory.io[GB_REG_BCPS] = 0x88; // Faked writing 4 BG palette entries
563 gb->memory.io[GB_REG_OCPS] = 0x90; // Faked writing 8 OBJ palette entries
564 gb->memory.io[GB_REG_SVBK] = 0xFF;
565 GBVideoDisableCGB(&gb->video);
566 }
567 nextDiv = 0xC;
568 break;
569 }
570
571 cpu->sp = 0xFFFE;
572 cpu->pc = 0x100;
573
574 gb->timer.nextDiv = GB_DMG_DIV_PERIOD * (16 - nextDiv);
575
576 mTimingDeschedule(&gb->timing, &gb->timer.event);
577 mTimingSchedule(&gb->timing, &gb->timer.event, gb->timer.nextDiv);
578
579 GBIOWrite(gb, GB_REG_LCDC, 0x91);
580 gb->memory.io[GB_REG_BANK] = 0x1;
581 GBVideoSkipBIOS(&gb->video);
582
583 if (gb->biosVf) {
584 GBUnmapBIOS(gb);
585 }
586}
587
588void GBMapBIOS(struct GB* gb) {
589 gb->biosVf->seek(gb->biosVf, 0, SEEK_SET);
590 gb->memory.romBase = malloc(GB_SIZE_CART_BANK0);
591 ssize_t size = gb->biosVf->read(gb->biosVf, gb->memory.romBase, GB_SIZE_CART_BANK0);
592 if (gb->memory.rom) {
593 memcpy(&gb->memory.romBase[size], &gb->memory.rom[size], GB_SIZE_CART_BANK0 - size);
594 if (size > 0x100) {
595 memcpy(&gb->memory.romBase[0x100], &gb->memory.rom[0x100], sizeof(struct GBCartridge));
596 }
597 }
598}
599
600void GBUnmapBIOS(struct GB* gb) {
601 if (gb->memory.io[GB_REG_BANK] == 0xFF && gb->memory.romBase != gb->memory.rom) {
602 free(gb->memory.romBase);
603 if (gb->memory.mbcType == GB_MMM01) {
604 GBMBCSwitchBank0(gb, gb->memory.romSize / GB_SIZE_CART_BANK0 - 2);
605 } else {
606 GBMBCSwitchBank0(gb, 0);
607 }
608 }
609 // XXX: Force AGB registers for AGB-mode
610 if (gb->model == GB_MODEL_AGB && gb->cpu->pc == 0x100) {
611 gb->cpu->b = 1;
612 }
613}
614
615void GBDetectModel(struct GB* gb) {
616 if (gb->model != GB_MODEL_AUTODETECT) {
617 return;
618 }
619 if (gb->biosVf) {
620 switch (_GBBiosCRC32(gb->biosVf)) {
621 case DMG_BIOS_CHECKSUM:
622 case DMG_2_BIOS_CHECKSUM:
623 gb->model = GB_MODEL_DMG;
624 break;
625 case MGB_BIOS_CHECKSUM:
626 gb->model = GB_MODEL_MGB;
627 break;
628 case SGB_BIOS_CHECKSUM:
629 gb->model = GB_MODEL_SGB;
630 break;
631 case SGB2_BIOS_CHECKSUM:
632 gb->model = GB_MODEL_SGB2;
633 break;
634 case CGB_BIOS_CHECKSUM:
635 gb->model = GB_MODEL_CGB;
636 break;
637 default:
638 gb->biosVf->close(gb->biosVf);
639 gb->biosVf = NULL;
640 }
641 }
642 if (gb->model == GB_MODEL_AUTODETECT && gb->memory.rom) {
643 const struct GBCartridge* cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
644 if (cart->cgb & 0x80) {
645 gb->model = GB_MODEL_CGB;
646 } else if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
647 gb->model = GB_MODEL_SGB;
648 } else {
649 gb->model = GB_MODEL_DMG;
650 }
651 }
652
653 switch (gb->model) {
654 case GB_MODEL_DMG:
655 case GB_MODEL_SGB:
656 case GB_MODEL_AUTODETECT: //Silence warnings
657 gb->audio.style = GB_AUDIO_DMG;
658 break;
659 case GB_MODEL_MGB:
660 case GB_MODEL_SGB2:
661 gb->audio.style = GB_AUDIO_MGB;
662 break;
663 case GB_MODEL_AGB:
664 case GB_MODEL_CGB:
665 gb->audio.style = GB_AUDIO_CGB;
666 break;
667 }
668}
669
670int GBValidModels(const uint8_t* bank0) {
671 const struct GBCartridge* cart = (const struct GBCartridge*) &bank0[0x100];
672 int models;
673 if (cart->cgb == 0x80) {
674 models = GB_MODEL_CGB | GB_MODEL_MGB;
675 } else if (cart->cgb == 0xC0) {
676 models = GB_MODEL_CGB;
677 } else {
678 models = GB_MODEL_MGB;
679 }
680 if (cart->sgb == 0x03 && cart->oldLicensee == 0x33) {
681 models |= GB_MODEL_SGB;
682 }
683 return models;
684}
685
686void GBUpdateIRQs(struct GB* gb) {
687 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F;
688 if (!irqs) {
689 gb->cpu->irqPending = false;
690 return;
691 }
692 gb->cpu->halted = false;
693
694 if (!gb->memory.ime) {
695 gb->cpu->irqPending = false;
696 return;
697 }
698 if (gb->cpu->irqPending) {
699 return;
700 }
701 SM83RaiseIRQ(gb->cpu);
702}
703
704void GBProcessEvents(struct SM83Core* cpu) {
705 struct GB* gb = (struct GB*) cpu->master;
706 do {
707 int32_t cycles = cpu->cycles;
708 int32_t nextEvent;
709
710 cpu->cycles = 0;
711 cpu->nextEvent = INT_MAX;
712
713 nextEvent = cycles;
714 do {
715#ifdef USE_DEBUGGERS
716 gb->timing.globalCycles += nextEvent;
717#endif
718 nextEvent = mTimingTick(&gb->timing, nextEvent);
719 } while (gb->cpuBlocked);
720 // This loop cannot early exit until the SM83 run loop properly handles mid-M-cycle-exits
721 cpu->nextEvent = nextEvent;
722
723 if (cpu->halted) {
724 cpu->cycles = cpu->nextEvent;
725 if (!gb->memory.ie || !gb->memory.ime) {
726 break;
727 }
728 }
729 if (gb->earlyExit) {
730 break;
731 }
732 } while (cpu->cycles >= cpu->nextEvent);
733 gb->earlyExit = false;
734 if (gb->cpuBlocked) {
735 cpu->cycles = cpu->nextEvent;
736 }
737}
738
739void GBSetInterrupts(struct SM83Core* cpu, bool enable) {
740 struct GB* gb = (struct GB*) cpu->master;
741 mTimingDeschedule(&gb->timing, &gb->eiPending);
742 if (!enable) {
743 gb->memory.ime = false;
744 GBUpdateIRQs(gb);
745 } else {
746 mTimingSchedule(&gb->timing, &gb->eiPending, 4 * cpu->tMultiplier);
747 }
748}
749
750uint16_t GBIRQVector(struct SM83Core* cpu) {
751 struct GB* gb = (struct GB*) cpu->master;
752 int irqs = gb->memory.ie & gb->memory.io[GB_REG_IF];
753
754 if (irqs & (1 << GB_IRQ_VBLANK)) {
755 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_VBLANK);
756 return GB_VECTOR_VBLANK;
757 }
758 if (irqs & (1 << GB_IRQ_LCDSTAT)) {
759 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_LCDSTAT);
760 return GB_VECTOR_LCDSTAT;
761 }
762 if (irqs & (1 << GB_IRQ_TIMER)) {
763 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_TIMER);
764 return GB_VECTOR_TIMER;
765 }
766 if (irqs & (1 << GB_IRQ_SIO)) {
767 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_SIO);
768 return GB_VECTOR_SIO;
769 }
770 if (irqs & (1 << GB_IRQ_KEYPAD)) {
771 gb->memory.io[GB_REG_IF] &= ~(1 << GB_IRQ_KEYPAD);
772 return GB_VECTOR_KEYPAD;
773 }
774 return 0;
775}
776
777static void _enableInterrupts(struct mTiming* timing, void* user, uint32_t cyclesLate) {
778 UNUSED(timing);
779 UNUSED(cyclesLate);
780 struct GB* gb = user;
781 gb->memory.ime = true;
782 GBUpdateIRQs(gb);
783}
784
785void GBHalt(struct SM83Core* cpu) {
786 struct GB* gb = (struct GB*) cpu->master;
787 if (!(gb->memory.ie & gb->memory.io[GB_REG_IF] & 0x1F)) {
788 cpu->cycles = cpu->nextEvent;
789 cpu->halted = true;
790 } else if (!gb->memory.ime) {
791 mLOG(GB, GAME_ERROR, "HALT bug");
792 cpu->executionState = SM83_CORE_HALT_BUG;
793 }
794}
795
796void GBStop(struct SM83Core* cpu) {
797 struct GB* gb = (struct GB*) cpu->master;
798 if (gb->model >= GB_MODEL_CGB && gb->memory.io[GB_REG_KEY1] & 1) {
799 gb->doubleSpeed ^= 1;
800 gb->cpu->tMultiplier = 2 - gb->doubleSpeed;
801 gb->memory.io[GB_REG_KEY1] = 0;
802 gb->memory.io[GB_REG_KEY1] |= gb->doubleSpeed << 7;
803 } else {
804 int sleep = ~(gb->memory.io[GB_REG_JOYP] & 0x30);
805 size_t c;
806 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
807 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
808 if (sleep && callbacks->sleep) {
809 callbacks->sleep(callbacks->context);
810 } else if (callbacks->shutdown) {
811 callbacks->shutdown(callbacks->context);
812 }
813 }
814 }
815}
816
817void GBIllegal(struct SM83Core* cpu) {
818 struct GB* gb = (struct GB*) cpu->master;
819 mLOG(GB, GAME_ERROR, "Hit illegal opcode at address %04X:%02X", cpu->pc, cpu->bus);
820#ifdef USE_DEBUGGERS
821 if (cpu->components && cpu->components[CPU_COMPONENT_DEBUGGER]) {
822 struct mDebuggerEntryInfo info = {
823 .address = cpu->pc,
824 .type.bp.opcode = cpu->bus
825 };
826 mDebuggerEnter((struct mDebugger*) cpu->components[CPU_COMPONENT_DEBUGGER], DEBUGGER_ENTER_ILLEGAL_OP, &info);
827 }
828#endif
829 // Hang forever
830 gb->memory.ime = 0;
831 --cpu->pc;
832}
833
834bool GBIsROM(struct VFile* vf) {
835 if (!vf) {
836 return false;
837 }
838 vf->seek(vf, 0x104, SEEK_SET);
839 uint8_t header[4];
840
841 if (vf->read(vf, &header, sizeof(header)) < (ssize_t) sizeof(header)) {
842 return false;
843 }
844 if (memcmp(header, _knownHeader, sizeof(header))) {
845 return false;
846 }
847 return true;
848}
849
850void GBGetGameTitle(const struct GB* gb, char* out) {
851 const struct GBCartridge* cart = NULL;
852 if (gb->memory.rom) {
853 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
854 }
855 if (!cart) {
856 return;
857 }
858 if (cart->oldLicensee != 0x33) {
859 memcpy(out, cart->titleLong, 16);
860 } else {
861 memcpy(out, cart->titleShort, 11);
862 }
863}
864
865void GBGetGameCode(const struct GB* gb, char* out) {
866 memset(out, 0, 8);
867 const struct GBCartridge* cart = NULL;
868 if (gb->memory.rom) {
869 cart = (const struct GBCartridge*) &gb->memory.rom[0x100];
870 }
871 if (!cart) {
872 return;
873 }
874 if (cart->cgb == 0xC0) {
875 memcpy(out, "CGB-????", 8);
876 } else {
877 memcpy(out, "DMG-????", 8);
878 }
879 if (cart->oldLicensee == 0x33) {
880 memcpy(&out[4], cart->maker, 4);
881 }
882}
883
884void GBFrameStarted(struct GB* gb) {
885 GBTestKeypadIRQ(gb);
886
887 size_t c;
888 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
889 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
890 if (callbacks->videoFrameStarted) {
891 callbacks->videoFrameStarted(callbacks->context);
892 }
893 }
894}
895
896void GBFrameEnded(struct GB* gb) {
897 GBSramClean(gb, gb->video.frameCounter);
898
899 if (gb->cpu->components && gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE]) {
900 struct mCheatDevice* device = (struct mCheatDevice*) gb->cpu->components[CPU_COMPONENT_CHEAT_DEVICE];
901 size_t i;
902 for (i = 0; i < mCheatSetsSize(&device->cheats); ++i) {
903 struct mCheatSet* cheats = *mCheatSetsGetPointer(&device->cheats, i);
904 mCheatRefresh(device, cheats);
905 }
906 }
907
908 // TODO: Move to common code
909 if (gb->stream && gb->stream->postVideoFrame) {
910 const color_t* pixels;
911 size_t stride;
912 gb->video.renderer->getPixels(gb->video.renderer, &stride, (const void**) &pixels);
913 gb->stream->postVideoFrame(gb->stream, pixels, stride);
914 }
915
916 size_t c;
917 for (c = 0; c < mCoreCallbacksListSize(&gb->coreCallbacks); ++c) {
918 struct mCoreCallbacks* callbacks = mCoreCallbacksListGetPointer(&gb->coreCallbacks, c);
919 if (callbacks->videoFrameEnded) {
920 callbacks->videoFrameEnded(callbacks->context);
921 }
922 }
923}
924
925enum GBModel GBNameToModel(const char* model) {
926 if (strcasecmp(model, "DMG") == 0) {
927 return GB_MODEL_DMG;
928 } else if (strcasecmp(model, "CGB") == 0) {
929 return GB_MODEL_CGB;
930 } else if (strcasecmp(model, "AGB") == 0) {
931 return GB_MODEL_AGB;
932 } else if (strcasecmp(model, "SGB") == 0) {
933 return GB_MODEL_SGB;
934 } else if (strcasecmp(model, "MGB") == 0) {
935 return GB_MODEL_MGB;
936 } else if (strcasecmp(model, "SGB2") == 0) {
937 return GB_MODEL_SGB2;
938 }
939 return GB_MODEL_AUTODETECT;
940}
941
942const char* GBModelToName(enum GBModel model) {
943 switch (model) {
944 case GB_MODEL_DMG:
945 return "DMG";
946 case GB_MODEL_SGB:
947 return "SGB";
948 case GB_MODEL_MGB:
949 return "MGB";
950 case GB_MODEL_SGB2:
951 return "SGB2";
952 case GB_MODEL_CGB:
953 return "CGB";
954 case GB_MODEL_AGB:
955 return "AGB";
956 default:
957 case GB_MODEL_AUTODETECT:
958 return NULL;
959 }
960}