Search code examples
javascriptcanvasthree.jsshaderwebgl

WebGL mixes colors on pixel level


I want to write a small program that calculates for example prime numbers. My idea was to use webgl/threejs to color all pixels that represent prime numbers (based on coordinates) white and the rest black. My problem is that webgl somehow mixes the colors on pixel level (if you have a white pixel in the middle of a black canvas it mixes with the environment and the pixels around that one white pixel become gray). With this problem, I can't identify white and black pixels properly.

My Question: How can I tell webgl or canvas to use the correct color for each pixel without mixing things?

Here is a demo where I colored every 2nd pixel white:

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>ProjectLaGPU</title>
</head>
<body>
    <script id="vertex-shader" type="x-shader/x-vertex">
        uniform float u_start;

        varying float v_start;

        void main() {
            v_start = u_start;
            gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
        }
    </script>
    <script id="fragment-shader" type="x-shader/x-fragment">
        varying float v_start;

        void main () {
            if (int(gl_FragCoord.x)%2==0 && int(gl_FragCoord.y)%2==0) {
                gl_FragColor = vec4(1, 1, 1, 1);
            } else {
                gl_FragColor = vec4(0, 0, 0, 1);
            }
        }
    </script>
    <script src="https://threejs.org/build/three.js"></script>
    <script src="main.js"></script>
</body>
</html>

/* main.js */
async function init() {
    const scene = new THREE.Scene();
    const camera = new THREE.OrthographicCamera(-.5, .5, .5, -.5, -1, 1);
    const renderer = new THREE.WebGLRenderer({
        preserveDrawingBuffer: true,
        powerPreference: "high-performance",
        depth: false,
        stencil: false
    });

    const geometry = new THREE.PlaneGeometry();
    const material = new THREE.ShaderMaterial({
        uniforms: {
            u_start: { value: 0.5 },
        },
        vertexShader: document.getElementById("vertex-shader").innerText,
        fragmentShader: document.getElementById("fragment-shader").innerText,
      });
    const mesh = new THREE.Mesh(geometry, material);
    renderer.setSize(1000, 1000);
    document.body.appendChild(renderer.domElement);
    scene.add(mesh)
    renderer.render(scene, camera);
}

init();

Solution

  • The previously accepted answer is not correct and only worked via luck... In other words if it worked for you you were lucky that your machine/browser/OS/current-settings happened to appear to work. Change a setting, switch to another browser/os/machine and it will not work.

    There are many issues being conflated here

    1. Getting non-blurry canvas

    There 2 things you have to do to get a non-blurry canvas

    turn off the browser's filtering

    In Chrome/Edge/Safari you can do this with canvas { image-rendering: pixelated; } on firefox canvas { image-rendering: crisp-edges; }

    Note that pixelated is more correct for this situation than crisp-edges. From the spec, given this original image

    original-image

    And displaying it 3x the size the default algorithm is up to the browser but a typical example might look something like this

    enter image description here

    pixelated would look something like this

    pixelated-style

    IMPORTANT: See update at bottom crisp-edges would look something like this

    crisp-edges-style

    But note that the actual algorithm used to draw the image is not defined by the spec 🤬 so at the moment firefox happens to use nearest-neighbor filtering for crisp-edges and does not support pixelated. Therefore you should specify pixelated last so it overrides crisp-edges if it exists

    A canvas has 2 sizes. Its drawingbuffer size (the number of pixels in the canvas) and it's display size (defined by CSS). But CSS sizes are computed in CSS pixels which are then scaled by the current devicePixelRatio. This means if you're not zoomed in or out and your on a device with a devicePixelRatio that's not 1.0 and your drawingbuffer size matches your display size then the canvas will be scaled to what it needs to be to match the devicePixelRatio. So, the image-rendering CSS style will be used to decide how that scaling happens.

    Example

    const scene = new THREE.Scene();
    const camera = new THREE.OrthographicCamera(-.5, .5, .5, -.5, -1, 1);
    const renderer = new THREE.WebGLRenderer();
    
    const geometry = new THREE.PlaneBufferGeometry();
    const material = new THREE.ShaderMaterial({
      vertexShader: document.getElementById("vertex-shader").innerText,
      fragmentShader: document.getElementById("fragment-shader").innerText,
    });
    const mesh = new THREE.Mesh(geometry, material);
    document.body.appendChild(renderer.domElement);
    scene.add(mesh)
    renderer.render(scene, camera);
    canvas { 
      image-rendering: crisp-edges;
      image-rendering: pixelated;  /* pixelated is more correct so put that last */
    }
    <script src="https://cdn.jsdelivr.net/npm/three@0.123/build/three.js"></script>
    
    <script id="vertex-shader" type="x-shader/x-vertex">
            void main() {
                gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
            }
    </script>
    <script id="fragment-shader" type="x-shader/x-fragment">
            void main () {
                if (int(gl_FragCoord.x)%2==0 && int(gl_FragCoord.y)%2==0) {
                    gl_FragColor = vec4(1, 1, 1, 1);
                } else {
                    gl_FragColor = vec4(0, 0, 0, 1);
                }
            }
    </script>

    turn off antialiasing.

    Three.js does this for you

    2. Getting 1 pixel in the canvas = to 1 pixel on the screen

    This is what calling setPixelRatio was trying to do. That it appeared to work for you was just luck. In other words when you try to run your code on a different machine or different browser it may not work.

    The issue is in order to set the size of the canvas's drawing buffer so that it matches 1 to 1 how many pixels will be used to display the canvas you have to ask the browser "how many pixels did you use".

    The reason is for example, let's say you have a device with device pixel ratio (dpr) of 3. Let's say the window is 100 device pixels wide. That's 33.3333 CSS pixels wide. If you look at window.innerWidth (which you should never do anyway) it will report 33 CSS pixels. If you then multiply by your dpr of 3 you get 99 but in reality the browser may/will draw 100.

    To ask what size was actually used you have to use ResizeObserver and have it report the exact size used via devicePixelContentBoxSize

    const scene = new THREE.Scene();
    const camera = new THREE.OrthographicCamera(-.5, .5, .5, -.5, -1, 1);
    const renderer = new THREE.WebGLRenderer();
    
    const geometry = new THREE.PlaneBufferGeometry();
    const material = new THREE.ShaderMaterial({
      vertexShader: document.getElementById("vertex-shader").innerText,
      fragmentShader: document.getElementById("fragment-shader").innerText,
    });
    
    const mesh = new THREE.Mesh(geometry, material);
    
    function onResize(entries) {
      for (const entry of entries) {
        let width;
        let height;
        let dpr = window.devicePixelRatio;
        if (entry.devicePixelContentBoxSize) {
          // NOTE: Only this path gives the correct answer
          // The other 2 paths are an imperfect fallback
          // for browsers that don't provide anyway to do this
          width = entry.devicePixelContentBoxSize[0].inlineSize;
          height = entry.devicePixelContentBoxSize[0].blockSize;
          dpr = 1; // it's already in width and height
        } else if (entry.contentBoxSize) {
          if (entry.contentBoxSize[0]) {
            width = entry.contentBoxSize[0].inlineSize;
            height = entry.contentBoxSize[0].blockSize;
          } else {
            width = entry.contentBoxSize.inlineSize;
            height = entry.contentBoxSize.blockSize;
          }
        } else {
          width = entry.contentRect.width;
          height = entry.contentRect.height;
        }
        // IMPORTANT! You must pass false here otherwise three.js
        // will mess up the CSS!
        renderer.setSize(Math.round(width * dpr), Math.round(height * dpr), false);
        renderer.render(scene, camera);
      }
    }
    
    const canvas = renderer.domElement;
    const resizeObserver = new ResizeObserver(onResize);
    try {
      // because some browers don't support this yet
      resizeObserver.observe(canvas, {box: 'device-pixel-content-box'});
    } catch (e) {
      resizeObserver.observe(canvas, {box: 'content-box'});
    }
    document.body.appendChild(renderer.domElement);
    scene.add(mesh)
    renderer.render(scene, camera);
    html, body, canvas {
      margin: 0;
      width: 100%;
      height: 100%;
      display: block;
    }
    <script src="https://cdn.jsdelivr.net/npm/three@0.123/build/three.js"></script>
    
    <script id="vertex-shader" type="x-shader/x-vertex">
            void main() {
                gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
            }
    </script>
    <script id="fragment-shader" type="x-shader/x-fragment">
            void main () {
                if (int(gl_FragCoord.x)%2==0 && int(gl_FragCoord.y)%2==0) {
                    gl_FragColor = vec4(1, 1, 1, 1);
                } else {
                    gl_FragColor = vec4(0, 0, 0, 1);
                }
            }
    </script>

    Any other method will only work sometimes.

    To prove it here's the method from using setPixelRatio and window.innerWidth vs using ResizeObserver

    function test(parent, fn) {
      const scene = new THREE.Scene();
      const camera = new THREE.OrthographicCamera(-.5, .5, .5, -.5, -1, 1);
      const renderer = new THREE.WebGLRenderer();
    
      const geometry = new THREE.PlaneBufferGeometry();
      const material = new THREE.ShaderMaterial({
        vertexShader: document.getElementById("vertex-shader").innerText,
        fragmentShader: document.getElementById("fragment-shader").innerText,
      });
    
      const mesh = new THREE.Mesh(geometry, material);
      fn(renderer, scene, camera);
      parent.appendChild(renderer.domElement);
      scene.add(mesh)
      renderer.render(scene, camera);
    }
    
    function resizeOberserverInit(renderer, scene, camera) {
      function onResize(entries) {
        for (const entry of entries) {
          let width;
          let height;
          let dpr = window.devicePixelRatio;
          if (entry.devicePixelContentBoxSize) {
          // NOTE: Only this path gives the correct answer
          // The other 2 paths are an imperfect fallback
          // for browsers that don't provide anyway to do this
            width = entry.devicePixelContentBoxSize[0].inlineSize;
            height = entry.devicePixelContentBoxSize[0].blockSize;
            dpr = 1; // it's already in width and height
          } else if (entry.contentBoxSize) {
            if (entry.contentBoxSize[0]) {
              width = entry.contentBoxSize[0].inlineSize;
              height = entry.contentBoxSize[0].blockSize;
            } else {
              width = entry.contentBoxSize.inlineSize;
              height = entry.contentBoxSize.blockSize;
            }
          } else {
            width = entry.contentRect.width;
            height = entry.contentRect.height;
          }
          // IMPORTANT! You must pass false here otherwise three.js
          // will mess up the CSS!
          renderer.setSize(Math.round(width * dpr), Math.round(height * dpr), false);
          renderer.render(scene, camera);
        }
      }
    
      const canvas = renderer.domElement;
      const resizeObserver = new ResizeObserver(onResize);
      try {
        // because some browers don't support this yet
        resizeObserver.observe(canvas, {box: 'device-pixel-content-box'});
      } catch (e) {
        resizeObserver.observe(canvas, {box: 'content-box'});
      }
    }
    
    function innerWidthDPRInit(renderer, scene, camera) {
      renderer.setPixelRatio(window.deivcePixelRatio);
      renderer.setSize(window.innerWidth, 70);
      window.addEventListener('resize', () => {
        renderer.setPixelRatio(window.deivcePixelRatio);
        renderer.setSize(window.innerWidth, 70);
        renderer.render(scene, camera);
      });
    }
    
    test(document.querySelector('#innerWidth-dpr'), innerWidthDPRInit);
    test(document.querySelector('#resizeObserver'), resizeOberserverInit);
    body {
      margin: 0;
    }
    canvas {
      height: 70px;
      display: block;
    }
    #resizeObserver,
    #resizeObserver>canvas {
      width: 100%;
    }
    <script src="https://cdn.jsdelivr.net/npm/three@0.123/build/three.js"></script>
    
    <script id="vertex-shader" type="x-shader/x-vertex">
            void main() {
                gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
            }
    </script>
    <script id="fragment-shader" type="x-shader/x-fragment">
            void main () {
                if (int(gl_FragCoord.x)%2==0 && int(gl_FragCoord.y)%2==0) {
                    gl_FragColor = vec4(1, 1, 1, 1);
                } else {
                    gl_FragColor = vec4(0, 0, 0, 1);
                }
            }
    </script>
    <div>via window.innerWidth and setPixelRatio</div>
    <div id="innerWidth-dpr"></div>
    <div>via ResizeObserver</div>
    <div id="resizeObserver"></div>

    Try zooming in the browser or try different devices.

    Update

    The spec changed in Feb 2021. crisp-edges now means "use nearest neighbor" and pixelated means "keep it looking pixelated" which can be translated as "if you want to then do something better than nearest neighbor that keeps the image pixelated". See this answer