Search code examples
openglunity-game-enginecg

CG shader works on DirectX but not Opengl


I am trying to write a simple blur fragment shader that supports float NaN values, but it is giving me unexpected results on OpenGL. I use floating point textures and set the green channel to 1 if the pixel is NaN, otherwise set the red channel to the float value.

On DirectX, it works fine, but on OpenGL it returns black instead of the green I am expecting for NaN values. I tested on Ubuntu with a GTX 1060 and Windows 10 with a GTX 750

DirectX output, expected result:

DirectX output, expected result

OpenGL output, incorrect result:

OpenGL output, incorrect result

CG code:

Shader "FX/DepthBlur_R"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };

            v2f vert (appdata v)
            {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.uv = v.uv;
                return o;
            }

            sampler2D _MainTex;
            float4 _MainTex_TexelSize;

            float4 frag (v2f i) : SV_Target
            {
                float2 col = tex2D(_MainTex, i.uv).rg;
                float2 up = tex2D(_MainTex, i.uv + fixed2(0, _MainTex_TexelSize.y)).rg;
                float2 down = tex2D(_MainTex, i.uv - fixed2(0, _MainTex_TexelSize.y)).rg;
                float2 left = tex2D(_MainTex, i.uv - fixed2(_MainTex_TexelSize.x, 0)).rg;
                float2 right = tex2D(_MainTex, i.uv + fixed2(_MainTex_TexelSize.x, 0)).rg;

                int count = 0;
                float sides = 0;
                if (up.g < 0.1) { sides += up.r; count++; }
                if (down.g < 0.1) { sides += down.r; count++; }
                if (left.g < 0.1) { sides += left.r; count++; }
                if (right.g < 0.1) { sides += right.r; count++; }
                sides = sides / count;

                float4 ret;
                if (count == 0 && col.g > 0.1) { // Nothing valid
                    ret = fixed4(0, 1, 0, 1);
                }
                else if (count == 0) { // Only col is valid
                    ret = float4(col.r, 0, 0, 1);
                }
                else if (col.g > 0.1) { // Only sides are valid
                    ret = float4(sides, 0, 0, 1);
                }
                else {
                    ret = float4(((col.r + sides) / 2), 0, 0, 1);
                }
                return ret;
            }
            ENDCG
        }
    }
}

The result is also strange when I switched to debug colors, see the comments in the else section:

if (count == 0 && col.g > 0.1) { // Nothing valid
    return fixed4(1, 0, 0, 1);
}
else if (count == 0) { // Only col is valid
    return fixed4(0, 1, 0, 1);
}
else if (col.g > 0.1) { // Only sides are valid
    return fixed4(0, 0, 1, 1);
}
else {
    //Uncommenting this line gives the expected result
    return fixed4(0, 0, 0, 1);
    //Uncommenting this line results in blue for the "Nothing valid" section
    //return float4(0, 0, 0, ((col.r + sides) / 2));
}

Solution

  • Somehow switching count from an int to a float fixed the issue. Does anyone know what might be happening to cause it?