all repos — mgba @ 3d996caf533cd55badf30f328f1a1f5fb72d450f

mGBA Game Boy Advance Emulator

Shaders: Hand-convert xBR shader to GLSL
Jeffrey Pfau jeffrey@endrift.com
Sat, 23 Jan 2016 15:45:39 -0800
commit

3d996caf533cd55badf30f328f1a1f5fb72d450f

parent

d07da0744cfb5aff43ae9aff6a7b97c2b9e874d9

2 files changed, 232 insertions(+), 501 deletions(-)

jump to
M res/shaders/xbr.shader/xbr.fsres/shaders/xbr.shader/xbr.fs

@@ -24,13 +24,56 @@

Incorporates some of the ideas from SABR shader. Thanks to Joshua Street. */ + +#define XBR_Y_WEIGHT 48.0 +#define XBR_EQ_THRESHOLD 10.0 +#define XBR_EQ_THRESHOLD2 2.0 +#define XBR_LV2_COEFFICIENT 2.0 + +const mat3 yuv = mat3(0.299, 0.587, 0.114, -0.169, -0.331, 0.499, 0.499, -0.418, -0.0813); +const vec4 delta = vec4(0.4, 0.4, 0.4, 0.4); + +vec4 df(vec4 A, vec4 B) +{ + return vec4(abs(A-B)); +} + +float c_df(vec3 c1, vec3 c2) { + vec3 df = abs(c1 - c2); + return df.r + df.g + df.b; +} + +bvec4 eq(vec4 A, vec4 B) +{ + return lessThan(df(A, B), vec4(XBR_EQ_THRESHOLD)); +} + +bvec4 eq2(vec4 A, vec4 B) +{ + return lessThan(df(A, B), vec4(XBR_EQ_THRESHOLD2)); +} + +bvec4 and(bvec4 A, bvec4 B) +{ + return bvec4(A.x && B.x, A.y && B.y, A.z && B.z, A.w && B.w); +} + +bvec4 or(bvec4 A, bvec4 B) +{ + return bvec4(A.x || B.x, A.y || B.y, A.z || B.z, A.w || B.w); +} + +vec4 weighted_distance(vec4 a, vec4 b, vec4 c, vec4 d, vec4 e, vec4 f, vec4 g, vec4 h) +{ + return (df(a,b) + df(a,c) + df(d,e) + df(d,f) + 4.0*df(g,h)); +} + // GLSL shader autogenerated by cg2glsl.py. #if __VERSION__ >= 130 -#define COMPAT_VARYING in +#define varying in #define COMPAT_TEXTURE texture out vec4 FragColor; #else -#define COMPAT_VARYING varying #define FragColor gl_FragColor #define COMPAT_TEXTURE texture2D #endif

@@ -45,459 +88,164 @@ #define COMPAT_PRECISION mediump

#else #define COMPAT_PRECISION #endif -vec4 _ret_0; -float _TMP57; -float _TMP56; -float _TMP55; -vec3 _TMP54; -vec3 _TMP53; -vec3 _TMP52; -vec3 _TMP51; -vec3 _TMP50; -vec3 _TMP49; -vec3 _TMP48; -vec3 _TMP47; -vec4 _TMP46; -vec4 _TMP45; -vec4 _TMP44; -vec4 _TMP37; -vec4 _TMP36; -vec4 _TMP65; -bvec4 _TMP35; -bvec4 _TMP34; -bvec4 _TMP33; -bvec4 _TMP32; -bvec4 _TMP31; -bvec4 _TMP30; -bvec4 _TMP29; -bvec4 _TMP28; -bvec4 _TMP27; -bvec4 _TMP26; -bvec4 _TMP25; -bvec4 _TMP24; -bvec4 _TMP23; -bvec4 _TMP22; -bvec4 _TMP21; -vec4 _TMP20; -vec4 _TMP19; -vec4 _TMP18; -vec4 _TMP17; -vec4 _TMP16; -vec4 _TMP15; -vec4 _TMP14; -vec4 _TMP13; -vec4 _TMP12; -vec4 _TMP11; -vec4 _TMP10; -vec4 _TMP9; -vec4 _TMP8; -vec4 _TMP7; -vec4 _TMP6; -vec4 _TMP5; -vec4 _TMP4; -vec4 _TMP3; -vec4 _TMP2; -vec4 _TMP1; -vec4 _TMP0; uniform sampler2D tex; -vec2 _x0078; -vec4 _r0122; -vec4 _r0132; -vec4 _r0142; -vec4 _r0152; -vec4 _r0162; -vec4 _r0172; -vec4 _TMP183; -vec4 _a0186; -vec4 _TMP189; -vec4 _a0192; -vec4 _TMP195; -vec4 _a0198; -vec4 _TMP201; -vec4 _a0204; -vec4 _TMP207; -vec4 _a0210; -vec4 _TMP213; -vec4 _a0216; -vec4 _TMP219; -vec4 _a0222; -vec4 _TMP225; -vec4 _a0228; -vec4 _TMP231; -vec4 _a0234; -vec4 _TMP237; -vec4 _a0240; -vec4 _TMP243; -vec4 _a0246; -vec4 _TMP249; -vec4 _a0252; -vec4 _TMP255; -vec4 _a0258; -vec4 _TMP261; -vec4 _a0264; -vec4 _TMP267; -vec4 _a0270; -vec4 _x0274; -vec4 _TMP275; -vec4 _x0284; -vec4 _TMP285; -vec4 _x0294; -vec4 _TMP295; -vec4 _x0304; -vec4 _TMP305; -vec4 _x0314; -vec4 _TMP315; -vec4 _TMP323; -vec4 _a0326; -vec4 _TMP327; -vec4 _a0330; -vec4 _TMP331; -vec4 _a0334; -vec4 _TMP335; -vec4 _a0338; -vec4 _TMP339; -vec4 _a0342; -vec4 _TMP345; -vec4 _a0348; -vec4 _TMP349; -vec4 _a0352; -vec4 _TMP353; -vec4 _a0356; -vec4 _TMP357; -vec4 _a0360; -vec4 _TMP361; -vec4 _a0364; -vec4 _TMP365; -vec4 _a0368; -vec4 _TMP369; -vec4 _a0372; -vec4 _TMP373; -vec4 _a0376; -vec4 _TMP377; -vec4 _a0380; -vec4 _TMP381; -vec4 _a0384; -vec4 _TMP385; -vec4 _a0388; -vec3 _df0402; -vec3 _a0404; -vec3 _df0406; -vec3 _a0408; -COMPAT_VARYING vec4 TEX0; -COMPAT_VARYING vec4 TEX1; -COMPAT_VARYING vec4 TEX2; -COMPAT_VARYING vec4 TEX3; -COMPAT_VARYING vec4 TEX4; -COMPAT_VARYING vec4 TEX5; -COMPAT_VARYING vec4 TEX6; -COMPAT_VARYING vec4 TEX7; +varying vec2 texCoord; +varying vec4 TEX1; +varying vec4 TEX2; +varying vec4 TEX3; +varying vec4 TEX4; +varying vec4 TEX5; +varying vec4 TEX6; +varying vec4 TEX7; + +const vec2 TextureSize = vec2(240.0, 160.0); -const COMPAT_PRECISION vec2 TextureSize = vec2(240.0, 160.0); void main() { - bvec4 _edr; - bvec4 _edr_left; - bvec4 _edr_up; - bvec4 _px; - bvec4 _interp_restriction_lv1; - bvec4 _interp_restriction_lv2_left; - bvec4 _interp_restriction_lv2_up; - bvec4 _interp_restriction_lv3_left; - bvec4 _interp_restriction_lv3_up; - bvec4 _nc; - bvec4 _nc30; - bvec4 _nc60; - bvec4 _nc45; - bvec4 _nc15; - bvec4 _nc75; - vec4 _fx; - vec4 _fx_left; - vec4 _fx_up; - vec4 _fx3_left; - vec4 _fx3_up; - vec3 _res1; - vec3 _res2; - vec3 _pix1; - vec3 _pix2; - float _blend1; - float _blend2; - vec2 _fp; - vec4 _fx45; - vec4 _fx30; - vec4 _fx60; - vec4 _fx15; - vec4 _fx75; - vec4 _final45; - vec4 _final30; - vec4 _final60; - vec4 _final15; - vec4 _final75; - vec4 _maximo; - vec3 _res; - _x0078 = TEX0.xy*TextureSize; - _fp = fract(_x0078); - _TMP0 = COMPAT_TEXTURE(tex, TEX1.xw); - _TMP1 = COMPAT_TEXTURE(tex, TEX1.yw); - _TMP2 = COMPAT_TEXTURE(tex, TEX1.zw); - _TMP3 = COMPAT_TEXTURE(tex, TEX2.xw); - _TMP4 = COMPAT_TEXTURE(tex, TEX2.yw); - _TMP5 = COMPAT_TEXTURE(tex, TEX2.zw); - _TMP6 = COMPAT_TEXTURE(tex, TEX3.xw); - _TMP7 = COMPAT_TEXTURE(tex, TEX3.yw); - _TMP8 = COMPAT_TEXTURE(tex, TEX3.zw); - _TMP9 = COMPAT_TEXTURE(tex, TEX4.xw); - _TMP10 = COMPAT_TEXTURE(tex, TEX4.yw); - _TMP11 = COMPAT_TEXTURE(tex, TEX4.zw); - _TMP12 = COMPAT_TEXTURE(tex, TEX5.xw); - _TMP13 = COMPAT_TEXTURE(tex, TEX5.yw); - _TMP14 = COMPAT_TEXTURE(tex, TEX5.zw); - _TMP15 = COMPAT_TEXTURE(tex, TEX6.xy); - _TMP16 = COMPAT_TEXTURE(tex, TEX6.xz); - _TMP17 = COMPAT_TEXTURE(tex, TEX6.xw); - _TMP18 = COMPAT_TEXTURE(tex, TEX7.xy); - _TMP19 = COMPAT_TEXTURE(tex, TEX7.xz); - _TMP20 = COMPAT_TEXTURE(tex, TEX7.xw); - _r0122.x = dot(_TMP4.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0122.y = dot(_TMP6.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0122.z = dot(_TMP10.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0122.w = dot(_TMP8.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0132.x = dot(_TMP5.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0132.y = dot(_TMP3.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0132.z = dot(_TMP9.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0132.w = dot(_TMP11.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0142.x = dot(_TMP7.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0142.y = dot(_TMP7.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0142.z = dot(_TMP7.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0142.w = dot(_TMP7.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0152.x = dot(_TMP20.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0152.y = dot(_TMP2.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0152.z = dot(_TMP15.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0152.w = dot(_TMP12.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0162.x = dot(_TMP14.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0162.y = dot(_TMP18.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0162.z = dot(_TMP0.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0162.w = dot(_TMP17.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0172.x = dot(_TMP13.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0172.y = dot(_TMP19.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0172.z = dot(_TMP1.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _r0172.w = dot(_TMP16.xyz, vec3( 1.43519993E+01, 2.81760006E+01, 5.47200012E+00)); - _fx = vec4( 1.00000000E+00, -1.00000000E+00, -1.00000000E+00, 1.00000000E+00)*_fp.y + vec4( 1.00000000E+00, 1.00000000E+00, -1.00000000E+00, -1.00000000E+00)*_fp.x; - _fx_left = vec4( 1.00000000E+00, -1.00000000E+00, -1.00000000E+00, 1.00000000E+00)*_fp.y + vec4( 5.00000000E-01, 2.00000000E+00, -5.00000000E-01, -2.00000000E+00)*_fp.x; - _fx_up = vec4( 1.00000000E+00, -1.00000000E+00, -1.00000000E+00, 1.00000000E+00)*_fp.y + vec4( 2.00000000E+00, 5.00000000E-01, -2.00000000E+00, -5.00000000E-01)*_fp.x; - _fx3_left = vec4( 6.00000000E+00, -2.00000000E+00, -6.00000000E+00, 2.00000000E+00)*_fp.y + vec4( 2.00000000E+00, 6.00000000E+00, -2.00000000E+00, -6.00000000E+00)*_fp.x; - _fx3_up = vec4( 2.00000000E+00, -6.00000000E+00, -2.00000000E+00, 6.00000000E+00)*_fp.y + vec4( 6.00000000E+00, 2.00000000E+00, -6.00000000E+00, -2.00000000E+00)*_fp.x; - _a0186 = _r0122.wxyz - _r0122; - _TMP183 = abs(_a0186); - _TMP21 = bvec4(_TMP183.x < 1.00000000E+01, _TMP183.y < 1.00000000E+01, _TMP183.z < 1.00000000E+01, _TMP183.w < 1.00000000E+01); - _a0192 = _r0122.wxyz - _r0132; - _TMP189 = abs(_a0192); - _TMP22 = bvec4(_TMP189.x < 1.00000000E+01, _TMP189.y < 1.00000000E+01, _TMP189.z < 1.00000000E+01, _TMP189.w < 1.00000000E+01); - _a0198 = _r0122.zwxy - _r0122.yzwx; - _TMP195 = abs(_a0198); - _TMP23 = bvec4(_TMP195.x < 1.00000000E+01, _TMP195.y < 1.00000000E+01, _TMP195.z < 1.00000000E+01, _TMP195.w < 1.00000000E+01); - _a0204 = _r0122.zwxy - _r0132.zwxy; - _TMP201 = abs(_a0204); - _TMP24 = bvec4(_TMP201.x < 1.00000000E+01, _TMP201.y < 1.00000000E+01, _TMP201.z < 1.00000000E+01, _TMP201.w < 1.00000000E+01); - _a0210 = _r0142 - _r0132.wxyz; - _TMP207 = abs(_a0210); - _TMP25 = bvec4(_TMP207.x < 1.00000000E+01, _TMP207.y < 1.00000000E+01, _TMP207.z < 1.00000000E+01, _TMP207.w < 1.00000000E+01); - _a0216 = _r0122.wxyz - _r0172.yzwx; - _TMP213 = abs(_a0216); - _TMP26 = bvec4(_TMP213.x < 1.00000000E+01, _TMP213.y < 1.00000000E+01, _TMP213.z < 1.00000000E+01, _TMP213.w < 1.00000000E+01); - _a0222 = _r0122.wxyz - _r0152; - _TMP219 = abs(_a0222); - _TMP27 = bvec4(_TMP219.x < 1.00000000E+01, _TMP219.y < 1.00000000E+01, _TMP219.z < 1.00000000E+01, _TMP219.w < 1.00000000E+01); - _a0228 = _r0122.zwxy - _r0172; - _TMP225 = abs(_a0228); - _TMP28 = bvec4(_TMP225.x < 1.00000000E+01, _TMP225.y < 1.00000000E+01, _TMP225.z < 1.00000000E+01, _TMP225.w < 1.00000000E+01); - _a0234 = _r0122.zwxy - _r0162; - _TMP231 = abs(_a0234); - _TMP29 = bvec4(_TMP231.x < 1.00000000E+01, _TMP231.y < 1.00000000E+01, _TMP231.z < 1.00000000E+01, _TMP231.w < 1.00000000E+01); - _a0240 = _r0142 - _r0132.zwxy; - _TMP237 = abs(_a0240); - _TMP30 = bvec4(_TMP237.x < 1.00000000E+01, _TMP237.y < 1.00000000E+01, _TMP237.z < 1.00000000E+01, _TMP237.w < 1.00000000E+01); - _a0246 = _r0142 - _r0132; - _TMP243 = abs(_a0246); - _TMP31 = bvec4(_TMP243.x < 1.00000000E+01, _TMP243.y < 1.00000000E+01, _TMP243.z < 1.00000000E+01, _TMP243.w < 1.00000000E+01); - _interp_restriction_lv1 = bvec4(_r0142.x != _r0122.w && _r0142.x != _r0122.z && (!_TMP21.x && !_TMP22.x || !_TMP23.x && !_TMP24.x || _TMP25.x && (!_TMP26.x && !_TMP27.x || !_TMP28.x && !_TMP29.x) || _TMP30.x || _TMP31.x), _r0142.y != _r0122.x && _r0142.y != _r0122.w && (!_TMP21.y && !_TMP22.y || !_TMP23.y && !_TMP24.y || _TMP25.y && (!_TMP26.y && !_TMP27.y || !_TMP28.y && !_TMP29.y) || _TMP30.y || _TMP31.y), _r0142.z != _r0122.y && _r0142.z != _r0122.x && (!_TMP21.z && !_TMP22.z || !_TMP23.z && !_TMP24.z || _TMP25.z && (!_TMP26.z && !_TMP27.z || !_TMP28.z && !_TMP29.z) || _TMP30.z || _TMP31.z), _r0142.w != _r0122.z && _r0142.w != _r0122.y && (!_TMP21.w && !_TMP22.w || !_TMP23.w && !_TMP24.w || _TMP25.w && (!_TMP26.w && !_TMP27.w || !_TMP28.w && !_TMP29.w) || _TMP30.w || _TMP31.w)); - _interp_restriction_lv2_left = bvec4(_r0142.x != _r0132.z && _r0122.y != _r0132.z, _r0142.y != _r0132.w && _r0122.z != _r0132.w, _r0142.z != _r0132.x && _r0122.w != _r0132.x, _r0142.w != _r0132.y && _r0122.x != _r0132.y); - _interp_restriction_lv2_up = bvec4(_r0142.x != _r0132.x && _r0122.x != _r0132.x, _r0142.y != _r0132.y && _r0122.y != _r0132.y, _r0142.z != _r0132.z && _r0122.z != _r0132.z, _r0142.w != _r0132.w && _r0122.w != _r0132.w); - _a0252 = _r0132.zwxy - _r0162.wxyz; - _TMP249 = abs(_a0252); - _TMP32 = bvec4(_TMP249.x < 2.00000000E+00, _TMP249.y < 2.00000000E+00, _TMP249.z < 2.00000000E+00, _TMP249.w < 2.00000000E+00); - _a0258 = _r0172.wxyz - _r0162.wxyz; - _TMP255 = abs(_a0258); - _TMP33 = bvec4(_TMP255.x < 2.00000000E+00, _TMP255.y < 2.00000000E+00, _TMP255.z < 2.00000000E+00, _TMP255.w < 2.00000000E+00); - _interp_restriction_lv3_left = bvec4(_TMP32.x && !_TMP33.x, _TMP32.y && !_TMP33.y, _TMP32.z && !_TMP33.z, _TMP32.w && !_TMP33.w); - _a0264 = _r0132 - _r0152.yzwx; - _TMP261 = abs(_a0264); - _TMP34 = bvec4(_TMP261.x < 2.00000000E+00, _TMP261.y < 2.00000000E+00, _TMP261.z < 2.00000000E+00, _TMP261.w < 2.00000000E+00); - _a0270 = _r0172.zwxy - _r0152.yzwx; - _TMP267 = abs(_a0270); - _TMP35 = bvec4(_TMP267.x < 2.00000000E+00, _TMP267.y < 2.00000000E+00, _TMP267.z < 2.00000000E+00, _TMP267.w < 2.00000000E+00); - _interp_restriction_lv3_up = bvec4(_TMP34.x && !_TMP35.x, _TMP34.y && !_TMP35.y, _TMP34.z && !_TMP35.z, _TMP34.w && !_TMP35.w); - _x0274 = (_fx - vec4( 1.10000002E+00, 9.99999940E-02, -8.99999976E-01, 9.99999940E-02))/vec4( 7.99999952E-01, 7.99999952E-01, 7.99999952E-01, 7.99999952E-01); - _TMP65 = min(vec4( 1.00000000E+00, 1.00000000E+00, 1.00000000E+00, 1.00000000E+00), _x0274); - _TMP275 = max(vec4( 0.00000000E+00, 0.00000000E+00, 0.00000000E+00, 0.00000000E+00), _TMP65); - _fx45 = _TMP275*_TMP275*(3.00000000E+00 - 2.00000000E+00*_TMP275); - _x0284 = (_fx_left - vec4( 6.00000024E-01, 6.00000024E-01, -8.99999976E-01, -4.00000006E-01))/vec4( 7.99999952E-01, 7.99999952E-01, 7.99999952E-01, 8.00000012E-01); - _TMP65 = min(vec4( 1.00000000E+00, 1.00000000E+00, 1.00000000E+00, 1.00000000E+00), _x0284); - _TMP285 = max(vec4( 0.00000000E+00, 0.00000000E+00, 0.00000000E+00, 0.00000000E+00), _TMP65); - _fx30 = _TMP285*_TMP285*(3.00000000E+00 - 2.00000000E+00*_TMP285); - _x0294 = (_fx_up - vec4( 1.60000002E+00, -4.00000006E-01, -1.39999998E+00, 9.99999940E-02))/vec4( 8.00000072E-01, 8.00000012E-01, 7.99999952E-01, 7.99999952E-01); - _TMP65 = min(vec4( 1.00000000E+00, 1.00000000E+00, 1.00000000E+00, 1.00000000E+00), _x0294); - _TMP295 = max(vec4( 0.00000000E+00, 0.00000000E+00, 0.00000000E+00, 0.00000000E+00), _TMP65); - _fx60 = _TMP295*_TMP295*(3.00000000E+00 - 2.00000000E+00*_TMP295); - _x0304 = (_fx3_left - vec4( 4.59999990E+00, 2.59999990E+00, -3.40000010E+00, -1.39999998E+00))/vec4( 8.00000191E-01, 8.00000191E-01, 8.00000191E-01, 7.99999952E-01); - _TMP65 = min(vec4( 1.00000000E+00, 1.00000000E+00, 1.00000000E+00, 1.00000000E+00), _x0304); - _TMP305 = max(vec4( 0.00000000E+00, 0.00000000E+00, 0.00000000E+00, 0.00000000E+00), _TMP65); - _fx15 = _TMP305*_TMP305*(3.00000000E+00 - 2.00000000E+00*_TMP305); - _x0314 = (_fx3_up - vec4( 4.59999990E+00, -1.39999998E+00, -3.40000010E+00, 2.59999990E+00))/vec4( 8.00000191E-01, 7.99999952E-01, 8.00000191E-01, 8.00000191E-01); - _TMP65 = min(vec4( 1.00000000E+00, 1.00000000E+00, 1.00000000E+00, 1.00000000E+00), _x0314); - _TMP315 = max(vec4( 0.00000000E+00, 0.00000000E+00, 0.00000000E+00, 0.00000000E+00), _TMP65); - _fx75 = _TMP315*_TMP315*(3.00000000E+00 - 2.00000000E+00*_TMP315); - _a0326 = _r0142 - _r0132; - _TMP323 = abs(_a0326); - _a0330 = _r0142 - _r0132.zwxy; - _TMP327 = abs(_a0330); - _a0334 = _r0132.wxyz - _r0172; - _TMP331 = abs(_a0334); - _a0338 = _r0132.wxyz - _r0172.yzwx; - _TMP335 = abs(_a0338); - _a0342 = _r0122.zwxy - _r0122.wxyz; - _TMP339 = abs(_a0342); - _TMP36 = _TMP323 + _TMP327 + _TMP331 + _TMP335 + 4.00000000E+00*_TMP339; - _a0348 = _r0122.zwxy - _r0122.yzwx; - _TMP345 = abs(_a0348); - _a0352 = _r0122.zwxy - _r0162; - _TMP349 = abs(_a0352); - _a0356 = _r0122.wxyz - _r0152; - _TMP353 = abs(_a0356); - _a0360 = _r0122.wxyz - _r0122; - _TMP357 = abs(_a0360); - _a0364 = _r0142 - _r0132.wxyz; - _TMP361 = abs(_a0364); - _TMP37 = _TMP345 + _TMP349 + _TMP353 + _TMP357 + 4.00000000E+00*_TMP361; - _edr = bvec4(_TMP36.x < _TMP37.x && _interp_restriction_lv1.x, _TMP36.y < _TMP37.y && _interp_restriction_lv1.y, _TMP36.z < _TMP37.z && _interp_restriction_lv1.z, _TMP36.w < _TMP37.w && _interp_restriction_lv1.w); - _a0368 = _r0122.wxyz - _r0132.zwxy; - _TMP365 = abs(_a0368); - _a0372 = _r0122.zwxy - _r0132; - _TMP369 = abs(_a0372); - _edr_left = bvec4((2.00000000E+00*_TMP365).x <= _TMP369.x && _interp_restriction_lv2_left.x, (2.00000000E+00*_TMP365).y <= _TMP369.y && _interp_restriction_lv2_left.y, (2.00000000E+00*_TMP365).z <= _TMP369.z && _interp_restriction_lv2_left.z, (2.00000000E+00*_TMP365).w <= _TMP369.w && _interp_restriction_lv2_left.w); - _a0376 = _r0122.wxyz - _r0132.zwxy; - _TMP373 = abs(_a0376); - _a0380 = _r0122.zwxy - _r0132; - _TMP377 = abs(_a0380); - _edr_up = bvec4(_TMP373.x >= (2.00000000E+00*_TMP377).x && _interp_restriction_lv2_up.x, _TMP373.y >= (2.00000000E+00*_TMP377).y && _interp_restriction_lv2_up.y, _TMP373.z >= (2.00000000E+00*_TMP377).z && _interp_restriction_lv2_up.z, _TMP373.w >= (2.00000000E+00*_TMP377).w && _interp_restriction_lv2_up.w); - _nc45 = bvec4(_edr.x && bool(_fx45.x), _edr.y && bool(_fx45.y), _edr.z && bool(_fx45.z), _edr.w && bool(_fx45.w)); - _nc30 = bvec4(_edr.x && _edr_left.x && bool(_fx30.x), _edr.y && _edr_left.y && bool(_fx30.y), _edr.z && _edr_left.z && bool(_fx30.z), _edr.w && _edr_left.w && bool(_fx30.w)); - _nc60 = bvec4(_edr.x && _edr_up.x && bool(_fx60.x), _edr.y && _edr_up.y && bool(_fx60.y), _edr.z && _edr_up.z && bool(_fx60.z), _edr.w && _edr_up.w && bool(_fx60.w)); - _nc15 = bvec4(_edr.x && _edr_left.x && _interp_restriction_lv3_left.x && bool(_fx15.x), _edr.y && _edr_left.y && _interp_restriction_lv3_left.y && bool(_fx15.y), _edr.z && _edr_left.z && _interp_restriction_lv3_left.z && bool(_fx15.z), _edr.w && _edr_left.w && _interp_restriction_lv3_left.w && bool(_fx15.w)); - _nc75 = bvec4(_edr.x && _edr_up.x && _interp_restriction_lv3_up.x && bool(_fx75.x), _edr.y && _edr_up.y && _interp_restriction_lv3_up.y && bool(_fx75.y), _edr.z && _edr_up.z && _interp_restriction_lv3_up.z && bool(_fx75.z), _edr.w && _edr_up.w && _interp_restriction_lv3_up.w && bool(_fx75.w)); - _a0384 = _r0142 - _r0122.wxyz; - _TMP381 = abs(_a0384); - _a0388 = _r0142 - _r0122.zwxy; - _TMP385 = abs(_a0388); - _px = bvec4(_TMP381.x <= _TMP385.x, _TMP381.y <= _TMP385.y, _TMP381.z <= _TMP385.z, _TMP381.w <= _TMP385.w); - _nc = bvec4(_nc75.x || _nc15.x || _nc30.x || _nc60.x || _nc45.x, _nc75.y || _nc15.y || _nc30.y || _nc60.y || _nc45.y, _nc75.z || _nc15.z || _nc30.z || _nc60.z || _nc45.z, _nc75.w || _nc15.w || _nc30.w || _nc60.w || _nc45.w); - _final45 = vec4(float(_nc45.x), float(_nc45.y), float(_nc45.z), float(_nc45.w))*_fx45; - _final30 = vec4(float(_nc30.x), float(_nc30.y), float(_nc30.z), float(_nc30.w))*_fx30; - _final60 = vec4(float(_nc60.x), float(_nc60.y), float(_nc60.z), float(_nc60.w))*_fx60; - _final15 = vec4(float(_nc15.x), float(_nc15.y), float(_nc15.z), float(_nc15.w))*_fx15; - _final75 = vec4(float(_nc75.x), float(_nc75.y), float(_nc75.z), float(_nc75.w))*_fx75; - _TMP44 = max(_final15, _final75); - _TMP45 = max(_final30, _final60); - _TMP46 = max(_TMP44, _TMP45); - _maximo = max(_TMP46, _final45); - if (_nc.x) { - if (_px.x) { - _TMP47 = _TMP8.xyz; - } else { - _TMP47 = _TMP10.xyz; - } - _pix1 = _TMP47; - _blend1 = _maximo.x; - } else { - if (_nc.y) { - if (_px.y) { - _TMP48 = _TMP4.xyz; - } else { - _TMP48 = _TMP8.xyz; - } - _pix1 = _TMP48; - _blend1 = _maximo.y; - } else { - if (_nc.z) { - if (_px.z) { - _TMP49 = _TMP6.xyz; - } else { - _TMP49 = _TMP4.xyz; - } - _pix1 = _TMP49; - _blend1 = _maximo.z; - } else { - if (_nc.w) { - if (_px.w) { - _TMP50 = _TMP10.xyz; - } else { - _TMP50 = _TMP6.xyz; - } - _pix1 = _TMP50; - _blend1 = _maximo.w; - } - } - } - } - if (_nc.w) { - if (_px.w) { - _TMP51 = _TMP10.xyz; - } else { - _TMP51 = _TMP6.xyz; - } - _pix2 = _TMP51; - _blend2 = _maximo.w; - } else { - if (_nc.z) { - if (_px.z) { - _TMP52 = _TMP6.xyz; - } else { - _TMP52 = _TMP4.xyz; - } - _pix2 = _TMP52; - _blend2 = _maximo.z; - } else { - if (_nc.y) { - if (_px.y) { - _TMP53 = _TMP4.xyz; - } else { - _TMP53 = _TMP8.xyz; - } - _pix2 = _TMP53; - _blend2 = _maximo.y; - } else { - if (_nc.x) { - if (_px.x) { - _TMP54 = _TMP8.xyz; - } else { - _TMP54 = _TMP10.xyz; - } - _pix2 = _TMP54; - _blend2 = _maximo.x; - } - } - } - } - _res1 = _TMP7.xyz + _blend1*(_pix1 - _TMP7.xyz); - _res2 = _TMP7.xyz + _blend2*(_pix2 - _TMP7.xyz); - _a0404 = _TMP7.xyz - _res1; - _df0402 = abs(_a0404); - _TMP55 = _df0402.x + _df0402.y + _df0402.z; - _a0408 = _TMP7.xyz - _res2; - _df0406 = abs(_a0408); - _TMP56 = _df0406.x + _df0406.y + _df0406.z; - _TMP57 = float((_TMP56 >= _TMP55)); - _res = _res1 + _TMP57*(_res2 - _res1); - _ret_0 = vec4(_res.x, _res.y, _res.z, 1.00000000E+00); - FragColor = _ret_0; - return; + bvec4 edr, edr_left, edr_up, edr3_left, edr3_up, px; // px = pixel, edr = edge detection rule + bvec4 interp_restriction_lv1, interp_restriction_lv2_left, interp_restriction_lv2_up; + bvec4 interp_restriction_lv3_left, interp_restriction_lv3_up; + bvec4 nc, nc30, nc60, nc45, nc15, nc75; // new_color + vec4 fx, fx_left, fx_up, finalfx, fx3_left, fx3_up; // inequations of straight lines. + vec3 res1, res2, pix1, pix2; + float blend1, blend2; + + vec2 fp = fract(texCoord * TextureSize); + + vec3 A1 = COMPAT_TEXTURE(tex, TEX1.xw).rgb; + vec3 B1 = COMPAT_TEXTURE(tex, TEX1.yw).rgb; + vec3 C1 = COMPAT_TEXTURE(tex, TEX1.zw).rgb; + + vec3 A = COMPAT_TEXTURE(tex, TEX2.xw).rgb; + vec3 B = COMPAT_TEXTURE(tex, TEX2.yw).rgb; + vec3 C = COMPAT_TEXTURE(tex, TEX2.zw).rgb; + + vec3 D = COMPAT_TEXTURE(tex, TEX3.xw).rgb; + vec3 E = COMPAT_TEXTURE(tex, TEX3.yw).rgb; + vec3 F = COMPAT_TEXTURE(tex, TEX3.zw).rgb; + + vec3 G = COMPAT_TEXTURE(tex, TEX4.xw).rgb; + vec3 H = COMPAT_TEXTURE(tex, TEX4.yw).rgb; + vec3 I = COMPAT_TEXTURE(tex, TEX4.zw).rgb; + + vec3 G5 = COMPAT_TEXTURE(tex, TEX5.xw).rgb; + vec3 H5 = COMPAT_TEXTURE(tex, TEX5.yw).rgb; + vec3 I5 = COMPAT_TEXTURE(tex, TEX5.zw).rgb; + + vec3 A0 = COMPAT_TEXTURE(tex, TEX6.xy).rgb; + vec3 D0 = COMPAT_TEXTURE(tex, TEX6.xz).rgb; + vec3 G0 = COMPAT_TEXTURE(tex, TEX6.xw).rgb; + + vec3 C4 = COMPAT_TEXTURE(tex, TEX7.xy).rgb; + vec3 F4 = COMPAT_TEXTURE(tex, TEX7.xz).rgb; + vec3 I4 = COMPAT_TEXTURE(tex, TEX7.xw).rgb; + + vec4 b = transpose(mat4x3(B, D, H, F)) * (XBR_Y_WEIGHT * yuv[0]); + vec4 c = transpose(mat4x3(C, A, G, I)) * (XBR_Y_WEIGHT * yuv[0]); + vec4 e = transpose(mat4x3(E, E, E, E)) * (XBR_Y_WEIGHT * yuv[0]); + vec4 d = b.yzwx; + vec4 f = b.wxyz; + vec4 g = c.zwxy; + vec4 h = b.zwxy; + vec4 i = c.wxyz; + + vec4 i4 = transpose(mat4x3(I4, C1, A0, G5)) * (XBR_Y_WEIGHT*yuv[0]); + vec4 i5 = transpose(mat4x3(I5, C4, A1, G0)) * (XBR_Y_WEIGHT*yuv[0]); + vec4 h5 = transpose(mat4x3(H5, F4, B1, D0)) * (XBR_Y_WEIGHT*yuv[0]); + vec4 f4 = h5.yzwx; + + vec4 c1 = i4.yzwx; + vec4 g0 = i5.wxyz; + vec4 b1 = h5.zwxy; + vec4 d0 = h5.wxyz; + + vec4 Ao = vec4( 1.0, -1.0, -1.0, 1.0 ); + vec4 Bo = vec4( 1.0, 1.0, -1.0,-1.0 ); + vec4 Co = vec4( 1.5, 0.5, -0.5, 0.5 ); + vec4 Ax = vec4( 1.0, -1.0, -1.0, 1.0 ); + vec4 Bx = vec4( 0.5, 2.0, -0.5,-2.0 ); + vec4 Cx = vec4( 1.0, 1.0, -0.5, 0.0 ); + vec4 Ay = vec4( 1.0, -1.0, -1.0, 1.0 ); + vec4 By = vec4( 2.0, 0.5, -2.0,-0.5 ); + vec4 Cy = vec4( 2.0, 0.0, -1.0, 0.5 ); + + vec4 Az = vec4( 6.0, -2.0, -6.0, 2.0 ); + vec4 Bz = vec4( 2.0, 6.0, -2.0, -6.0 ); + vec4 Cz = vec4( 5.0, 3.0, -3.0, -1.0 ); + vec4 Aw = vec4( 2.0, -6.0, -2.0, 6.0 ); + vec4 Bw = vec4( 6.0, 2.0, -6.0,-2.0 ); + vec4 Cw = vec4( 5.0, -1.0, -3.0, 3.0 ); + + fx = (Ao*fp.y+Bo*fp.x); + fx_left = (Ax*fp.y+Bx*fp.x); + fx_up = (Ay*fp.y+By*fp.x); + fx3_left= (Az*fp.y+Bz*fp.x); + fx3_up = (Aw*fp.y+Bw*fp.x); + + // It uses CORNER_C if none of the others are defined. +#ifdef CORNER_A + interp_restriction_lv1 = and(notEqual(e, f), notEqual(e, h)); +#elif defined(CORNER_B) + interp_restriction_lv1 = ((e!=f) && (e!=h) && ( !eq(f,b) && !eq(h,d) || eq(e,i) && !eq(f,i4) && !eq(h,i5) || eq(e,g) || eq(e,c) ) ); +#elif defined(CORNER_D) + interp_restriction_lv1 = ((e!=f) && (e!=h) && ( !eq(f,b) && !eq(h,d) || eq(e,i) && !eq(f,i4) && !eq(h,i5) || eq(e,g) || eq(e,c) ) && (f!=f4 && f!=i || h!=h5 && h!=i || h!=g || f!=c || eq(b,c1) && eq(d,g0))); +#else + interp_restriction_lv1 = and(and(notEqual(e, f), notEqual(e, h)), + or(or(and(not(eq(f,b)), not(eq(f,c))), + and(not(eq(h,d)), not(eq(h,g)))), + or(and(eq(e,i), or(and(not(eq(f,f4)), not(eq(f,i4))), + and(not(eq(h,h5)), not(eq(h,i5))))), + or(eq(e,g), eq(e,c))))); +#endif + + interp_restriction_lv2_left = and(notEqual(e, g), notEqual(d, g)); + interp_restriction_lv2_up = and(notEqual(e, c), notEqual(b, c)); + interp_restriction_lv3_left = and(eq2(g,g0), not(eq2(d0,g0))); + interp_restriction_lv3_up = and(eq2(c,c1), not(eq2(b1,c1))); + + vec4 fx45 = smoothstep(Co - delta, Co + delta, fx); + vec4 fx30 = smoothstep(Cx - delta, Cx + delta, fx_left); + vec4 fx60 = smoothstep(Cy - delta, Cy + delta, fx_up); + vec4 fx15 = smoothstep(Cz - delta, Cz + delta, fx3_left); + vec4 fx75 = smoothstep(Cw - delta, Cw + delta, fx3_up); + + edr = and(lessThan(weighted_distance( e, c, g, i, h5, f4, h, f), weighted_distance( h, d, i5, f, i4, b, e, i)), interp_restriction_lv1); + edr_left = and(lessThanEqual((XBR_LV2_COEFFICIENT*df(f,g)), df(h,c)), interp_restriction_lv2_left); + edr_up = and(greaterThanEqual(df(f,g), (XBR_LV2_COEFFICIENT*df(h,c))), interp_restriction_lv2_up); + edr3_left = interp_restriction_lv3_left; + edr3_up = interp_restriction_lv3_up; + + nc45 = and(edr, bvec4(fx45)); + nc30 = and(edr, and(edr_left, bvec4(fx30))); + nc60 = and(edr, and(edr_up, bvec4(fx60))); + nc15 = and(and(edr, edr_left), and(edr3_left, bvec4(fx15))); + nc75 = and(and(edr, edr_up), and(edr3_up, bvec4(fx75))); + + px = lessThanEqual(df(e, f), df(e, h)); + + nc = bvec4(nc75.x || nc15.x || nc30.x || nc60.x || nc45.x, nc75.y || nc15.y || nc30.y || nc60.y || nc45.y, nc75.z || nc15.z || nc30.z || nc60.z || nc45.z, nc75.w || nc15.w || nc30.w || nc60.w || nc45.w); + + vec4 final45 = vec4(nc45) * fx45; + vec4 final30 = vec4(nc30) * fx30; + vec4 final60 = vec4(nc60) * fx60; + vec4 final15 = vec4(nc15) * fx15; + vec4 final75 = vec4(nc75) * fx75; + + vec4 maximo = max(max(max(final15, final75),max(final30, final60)), final45); + + if (nc.x) {pix1 = px.x ? F : H; blend1 = maximo.x;} + else if (nc.y) {pix1 = px.y ? B : F; blend1 = maximo.y;} + else if (nc.z) {pix1 = px.z ? D : B; blend1 = maximo.z;} + else if (nc.w) {pix1 = px.w ? H : D; blend1 = maximo.w;} + + if (nc.w) {pix2 = px.w ? H : D; blend2 = maximo.w;} + else if (nc.z) {pix2 = px.z ? D : B; blend2 = maximo.z;} + else if (nc.y) {pix2 = px.y ? B : F; blend2 = maximo.y;} + else if (nc.x) {pix2 = px.x ? F : H; blend2 = maximo.x;} + + res1 = mix(E, pix1, blend1); + res2 = mix(E, pix2, blend2); + vec3 res = mix(res1, res2, step(c_df(E, res1), c_df(E, res2))); + + FragColor = vec4(res, 1.0); }
M res/shaders/xbr.shader/xbr.vsres/shaders/xbr.shader/xbr.vs

@@ -21,57 +21,40 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - Incorporates some of the ideas from SABR shader. Thanks to Joshua Street. */ -// GLSL shader autogenerated by cg2glsl.py. -#if __VERSION__ >= 130 -#define COMPAT_VARYING out -#define COMPAT_ATTRIBUTE in -#define COMPAT_TEXTURE texture -#else -#define COMPAT_VARYING varying -#define COMPAT_ATTRIBUTE attribute -#define COMPAT_TEXTURE texture2D -#endif -#ifdef GL_ES -#define COMPAT_PRECISION mediump -#else -#define COMPAT_PRECISION -#endif -COMPAT_VARYING vec4 _t7; -COMPAT_VARYING vec4 _t6; -COMPAT_VARYING vec4 _t5; -COMPAT_VARYING vec4 _t4; -COMPAT_VARYING vec4 _t3; -COMPAT_VARYING vec4 _t2; -COMPAT_VARYING vec4 _t1; -vec4 _r0007; -COMPAT_ATTRIBUTE vec4 position; -COMPAT_VARYING vec4 TEX0; -COMPAT_VARYING vec4 TEX1; -COMPAT_VARYING vec4 TEX2; -COMPAT_VARYING vec4 TEX3; -COMPAT_VARYING vec4 TEX4; -COMPAT_VARYING vec4 TEX5; -COMPAT_VARYING vec4 TEX6; -COMPAT_VARYING vec4 TEX7; -const COMPAT_PRECISION vec2 TextureSize = vec2(240.0, 160.0); +varying vec2 texCoord; +varying vec4 TEX1; +varying vec4 TEX2; +varying vec4 TEX3; +varying vec4 TEX4; +varying vec4 TEX5; +varying vec4 TEX6; +varying vec4 TEX7; +attribute vec4 position; +/* VERTEX_SHADER */ void main() { - vec2 _ps; - vec2 _texCoord; - _ps = vec2(1.00000000E+00/TextureSize.x, 1.00000000E+00/TextureSize.y); - _texCoord = (position.st + vec2(1.0, 1.0)) * vec2(0.5, 0.5) + vec2( 1.00000001E-07, 1.00000001E-07); - gl_Position = position; - TEX0.xy = _texCoord; - TEX1 = _texCoord.xxxy + vec4(-_ps.x, 0.00000000E+00, _ps.x, -2.00000000E+00*_ps.y); - TEX2 = _texCoord.xxxy + vec4(-_ps.x, 0.00000000E+00, _ps.x, -_ps.y); - TEX3 = _texCoord.xxxy + vec4(-_ps.x, 0.00000000E+00, _ps.x, 0.00000000E+00); - TEX4 = _texCoord.xxxy + vec4(-_ps.x, 0.00000000E+00, _ps.x, _ps.y); - TEX5 = _texCoord.xxxy + vec4(-_ps.x, 0.00000000E+00, _ps.x, 2.00000000E+00*_ps.y); - TEX6 = _texCoord.xyyy + vec4(-2.00000000E+00*_ps.x, -_ps.y, 0.00000000E+00, _ps.y); - TEX7 = _texCoord.xyyy + vec4(2.00000000E+00*_ps.x, -_ps.y, 0.00000000E+00, _ps.y); -} + gl_Position = position; + + vec2 ps = vec2(1.0/240.0, 1.0/160.0); + float dx = ps.x; + float dy = ps.y; + + // A1 B1 C1 + // A0 A B C C4 + // D0 D E F F4 + // G0 G H I I4 + // G5 H5 I5 + + texCoord = (position.st + vec2(1.0, 1.0)) * vec2(0.5, 0.5); + TEX1 = texCoord.xxxy + vec4( -dx, 0, dx,-2.0*dy); // A1 B1 C1 + TEX2 = texCoord.xxxy + vec4( -dx, 0, dx, -dy); // A B C + TEX3 = texCoord.xxxy + vec4( -dx, 0, dx, 0); // D E F + TEX4 = texCoord.xxxy + vec4( -dx, 0, dx, dy); // G H I + TEX5 = texCoord.xxxy + vec4( -dx, 0, dx, 2.0*dy); // G5 H5 I5 + TEX6 = texCoord.xyyy + vec4(-2.0*dx,-dy, 0, dy); // A0 D0 G0 + TEX7 = texCoord.xyyy + vec4( 2.0*dx,-dy, 0, dy); // C4 F4 I4 +}