Search code examples
webgpu

Why two pipelines created in WebGPU and both of them are multisampled,only one of them can be displayed?


I'm using WebGPU to develop some GIS layers.When I use WebGPU to draw lines with MSAA(multisample), I found a strange problem: here are two pipeline init functions:

//init func for layer ODLine
export async function initPipelineODLine(data, device, format, size, viewport, mvpbuffer) {

    let N = 60;

    const vertexArray = new Float32Array(data.area.length * 3)

    const colorArr = new Float32Array(data.color)

    data.area.forEach((point, i) => {
        vertexArray[i * 3] = lngLatToWorld(point)[0];
        vertexArray[i * 3 + 1] = lngLatToWorld(point)[1];
        vertexArray[i * 3 + 2] = 0;

    });


    const pipeline = await device.createRenderPipelineAsync({
        layout: 'auto',
        vertex: {
            module: device.createShaderModule({
                code: vertodline
            }),
            entryPoint: 'main',
            buffers: [{
                arrayStride: 3 * 4,
                attributes: [{
                    shaderLocation: 0,
                    offset: 0,
                    format: 'float32x3'
                }]
            },
            {
                arrayStride: 4 * 4,
                attributes: [{
                    shaderLocation: 1,
                    offset: 0,
                    format: 'float32x4'
                }]
            },
            ],
        },

        fragment: {
            module: device.createShaderModule({
                code: fragodline
            }),
            entryPoint: 'main',
            targets: [{ format }]
        },
        primitive: {
            topology: 'triangle-list',
            //cullMode: 'back'
        },
        depthStencil: {
            depthWriteEnabled: true,
            depthCompare: 'less',
            format: 'depth24plus',
        },
        multisample: {
            count: 4,
            alphaToCoverageEnabled: true
        }
    })
    // create depthTexture for renderPass
    const multiTexture =  device.createTexture({
        size,
        sampleCount: 4,
        format,
        usage: window.GPUTextureUsage.RENDER_ATTACHMENT,
    });

    const depthTexture = device.createTexture({
        size,
        sampleCount: 4,
        format: 'depth24plus',
        usage: window.GPUTextureUsage.RENDER_ATTACHMENT,
    });
    const depthView = depthTexture.createView()
    // create vertex buffer
    const vertexBuffer = device.createBuffer({
        size: vertexArray.byteLength, // vertex.length * 4
        usage: window.GPUBufferUsage.VERTEX | window.GPUBufferUsage.COPY_DST
    })
    const colorBuffer = device.createBuffer({
        size: colorArr.byteLength,
        usage: window.GPUBufferUsage.VERTEX | window.GPUBufferUsage.COPY_DST
    })

    device.queue.writeBuffer(vertexBuffer, 0, vertexArray)
    device.queue.writeBuffer(colorBuffer, 0, colorArr)

    // console.log(vertexBuffer, colorBuffer);

    // create a mvp matrix buffer


    // const mvpBuffer = device.createBuffer({
    //     size: mvp0.length * 4,
    //     usage: window.GPUBufferUsage.UNIFORM | window.GPUBufferUsage.COPY_DST
    // })
    const mvpBuffer = mvpbuffer

    // console.log(mvpMatrix);
    // console.log(mvp0);

    // device.queue.writeBuffer(mvpBuffer, 0, mvp0)
    // create a uniform group for Matrix
    const uniformGroup = device.createBindGroup({
        layout: pipeline.getBindGroupLayout(0),
        entries: [
            {
                binding: 0,
                resource: { buffer: mvpBuffer }
            },
        ]
    })
    // return all vars
    return { pipeline, vertexBuffer, colorBuffer, mvpBuffer, uniformGroup, depthTexture, depthView,multiTexture }
}

and another:

export async function initPipelineLine(data, device, format, size, viewport, mvpbuffer) {

    let N = 60;

    const vertexArray = new Float32Array(data.area.length * 3)

    const colorArr = new Float32Array(data.color)

    data.area.forEach((point, i) => {
        vertexArray[i * 3] = lngLatToWorld(point)[0];
        vertexArray[i * 3 + 1] = lngLatToWorld(point)[1];
        vertexArray[i * 3 + 2] = 0;

    });


    const pipeline = await device.createRenderPipelineAsync({
        layout: 'auto',
        vertex: {
            module: device.createShaderModule({
                code: vertpoint
            }),
            entryPoint: 'main',
            buffers: [{
                arrayStride: 3 * 4,
                attributes: [{
                    shaderLocation: 0,
                    offset: 0,
                    format: 'float32x3'
                }]
            },
            {
                arrayStride: 3 * 4,
                attributes: [{
                    shaderLocation: 1,
                    offset: 0,
                    format: 'float32x3'
                }]
            },
            ],
        },

        fragment: {
            module: device.createShaderModule({
                code: fragpoint
            }),
            entryPoint: 'main',
            targets: [{ format }]
        },
        primitive: {
            topology: 'triangle-list',
            //cullMode: 'back'
        },
        depthStencil: {
            depthWriteEnabled: true,
            depthCompare: 'less',
            format: 'depth24plus',
        },
        multisample: {
            count: 4,
            alphaToCoverageEnabled: true
        }
    })
    // create depthTexture for renderPass
    const multiTexture =  device.createTexture({
        size,
        sampleCount: 4,
        format,
        usage: window.GPUTextureUsage.RENDER_ATTACHMENT,
    });
    const depthTexture = device.createTexture({
        size,
        sampleCount: 4,
        format: 'depth24plus',
        usage: window.GPUTextureUsage.RENDER_ATTACHMENT,
    });
    const depthView = depthTexture.createView()
    // create vertex buffer
    const vertexBuffer = device.createBuffer({
        size: vertexArray.byteLength, // vertex.length * 4
        usage: window.GPUBufferUsage.VERTEX | window.GPUBufferUsage.COPY_DST
    })
    const colorBuffer = device.createBuffer({
        size: colorArr.byteLength,
        usage: window.GPUBufferUsage.VERTEX | window.GPUBufferUsage.COPY_DST
    })



    device.queue.writeBuffer(vertexBuffer, 0, vertexArray)
    device.queue.writeBuffer(colorBuffer, 0, colorArr)

    // console.log(vertexBuffer, colorBuffer);

    // create a mvp matrix buffer


    // const mvpBuffer = device.createBuffer({
    //     size: mvp0.length * 4,
    //     usage: window.GPUBufferUsage.UNIFORM | window.GPUBufferUsage.COPY_DST
    // })
    const mvpBuffer = mvpbuffer

    // console.log(mvpMatrix);
    // console.log(mvp0);

    // device.queue.writeBuffer(mvpBuffer, 0, mvp0)
    // create a uniform group for Matrix
    const uniformGroup = device.createBindGroup({
        layout: pipeline.getBindGroupLayout(0),
        entries: [
            {
                binding: 0,
                resource: { buffer: mvpBuffer }
            },
        ]
    })
    // return all vars
    return { pipeline, vertexBuffer, colorBuffer, mvpBuffer, uniformGroup, depthTexture, depthView, multiTexture }
}

then I draw these layers:

else if (data.type === 'bus') {
        pipelineObj[k] = await initPipelineLine(data, device, format, size, viewport, mvpbuffer)



        // device.queue.writeBuffer(pipelineObj[k].mvpBuffer, 0, mvp01)
        // readBuffer.unmap()
        // then draw

        async function drawall() {
          commandEncoderArc = await device.createCommandEncoder()
          passEncoderBus = commandEncoderArc.beginRenderPass({
            colorAttachments: [{
              // view:context.getCurrentTexture().createView(),
              view: pipelineObj[k].multiTexture.createView(),
              resolveTarget: context.getCurrentTexture().createView(),
              clearValue: [0, 0, 0, 0],
              loadOp: 'clear',
              storeOp: 'store'
            }],
            depthStencilAttachment: {
              view: pipelineObj[k].depthTexture.createView(),
              depthClearValue: 1.0,
              depthLoadOp: 'clear',
              depthStoreOp: 'store',
            },
          })
          passEncoderBus.setPipeline(pipelineObj[k].pipeline)
          // device.queue.writeBuffer(pipelineObj[k].tBuffer, 0, t1)
          setsome(data, device, context, pipelineObj[k], passEncoderBus, commandEncoderArc)
          draw(data, device, context, pipelineObj[k], passEncoderBus, commandEncoderArc)
          requestAnimationFrame(drawall)
        }
        drawall()
      }
      else if (data.type === 'odline') {
        pipelineObj[k] = await initPipelineODLine(data, device, format, size, viewport, mvpbuffer)
        // device.queue.writeBuffer(pipelineObj[k].mvpBuffer, 0, mvp01)
        // readBuffer.unmap()
        // then draw
        async function drawall() {
          commandEncoderArc = await device.createCommandEncoder()
          passEncoderODLine = commandEncoderArc.beginRenderPass({
            colorAttachments: [{
              // view:context.getCurrentTexture().createView(),
              view: pipelineObj[k].multiTexture.createView(),
              resolveTarget: context.getCurrentTexture().createView(),
              clearValue: [0, 0, 0, 0],
              loadOp: 'clear',
              storeOp: 'store'
            }],
            depthStencilAttachment: {
              view: pipelineObj[k].depthTexture.createView(),
              depthClearValue: 1.0,
              depthLoadOp: 'clear',
              depthStoreOp: 'store',
            },
          })
          passEncoderODLine.setPipeline(pipelineObj[k].pipeline)
          // device.queue.writeBuffer(pipelineObj[k].tBuffer, 0, t1)
          setsome(data, device, context, pipelineObj[k], passEncoderODLine, commandEncoderArc)
          draw(data, device, context, pipelineObj[k], passEncoderODLine, commandEncoderArc)
          requestAnimationFrame(drawall)
        }
        drawall()
      }

the problem is that when only one of these pipelines enabled MSAA, layers can be displayed correctly. one of these layers opens MSAA,multisample count of layerODline is 4 and multisample count of layerLine is 1

But when both of them enables MSAA, only the last layer can be displayed.

both of them opens MSAA, only layerLine can be displayed and layerODline is disappered

I tried to change the depthTexture format but noffect.All of layers can be displayed correctly before enableing MSAA in two layers.

ALSO THANK FOR THE COMMENT FROM @GMAN BUT IT DOS NOT WORK I tried to change the loadOp to load , it not only does not work,but also seems like a disaster…

what a disaster... more and more lines gathering on the screen and the odline layer also disappered


Solution

  • you have loadOp: 'clear' in both render passes. You only want to clear in the first render pass. You should set loadOp to 'load' in render passes after the first one

    Also, you only want to resolve in the last render pass, otherwise you're resolving extra times for no reason.

    html, body { margin: 0; height: 100% }
    canvas { width: 100%; height: 100%; display: block; }
    
    #fail {
      position: fixed;
      left: 0;
      top: 0;
      width: 100%;
      height: 100%;
      display: flex;
      justify-content: center;
      align-items: center;
      background: red;
      color: white;
      font-weight: bold;
      font-family: monospace;
      font-size: 16pt;
      text-align: center;
    }
    <canvas></canvas>
    <div id="fail" style="display: none">
      <div class="content"></div>
    </div>
      
    <script type="module">
    // WebGPU Cube
    // from https://webgpufundamentals.org/webgpu/webgpu-cube.html
    
    
    /* global GPUBufferUsage */
    /* global GPUTextureUsage */
    
    import {vec3, mat4} from 'https://webgpufundamentals.org/3rdparty/wgpu-matrix.module.js';
    
    async function main() {
      const adapter = await navigator.gpu?.requestAdapter();
      const device = await adapter?.requestDevice();
      if (!device) {
        fail('need webgpu');
        return;
      }
    
      const canvas = document.querySelector('canvas');
      const context = canvas.getContext('webgpu');
    
      const presentationFormat = navigator.gpu.getPreferredCanvasFormat(adapter);
      const presentationSize = [300, 150];  // default canvas size
    
      context.configure({
        alphaMode: "opaque",
        format: presentationFormat,
        device,
      });
    
      const canvasInfo = {
        canvas,
        context,
        presentationSize,
        presentationFormat,
        // these are filled out in resizeToDisplaySize
        renderTarget: undefined,
        renderTargetView: undefined,
        depthTexture: undefined,
        depthTextureView: undefined,
        sampleCount: 4,  // can be 1 or 4
      };
    
      const shaderSrc = `
      struct VSUniforms {
        worldViewProjection: mat4x4<f32>,
        worldInverseTranspose: mat4x4<f32>,
      };
      @group(0) @binding(0) var<uniform> vsUniforms: VSUniforms;
    
      struct MyVSInput {
          @location(0) position: vec4<f32>,
          @location(1) normal: vec3<f32>,
          @location(2) texcoord: vec2<f32>,
      };
    
      struct MyVSOutput {
        @builtin(position) position: vec4<f32>,
        @location(0) normal: vec3<f32>,
        @location(1) texcoord: vec2<f32>,
      };
    
      @vertex
      fn myVSMain(v: MyVSInput) -> MyVSOutput {
        var vsOut: MyVSOutput;
        vsOut.position = vsUniforms.worldViewProjection * v.position;
        vsOut.normal = (vsUniforms.worldInverseTranspose * vec4<f32>(v.normal, 0.0)).xyz;
        vsOut.texcoord = v.texcoord;
        return vsOut;
      }
    
      struct FSUniforms {
        lightDirection: vec3<f32>,
      };
    
      @group(0) @binding(1) var<uniform> fsUniforms: FSUniforms;
      @group(0) @binding(2) var diffuseSampler: sampler;
      @group(0) @binding(3) var diffuseTexture: texture_2d<f32>;
    
      @fragment
      fn myFSMain(v: MyVSOutput) -> @location(0) vec4<f32> {
        var diffuseColor = textureSample(diffuseTexture, diffuseSampler, v.texcoord);
        var a_normal = normalize(v.normal);
        var l = dot(a_normal, fsUniforms.lightDirection) * 0.5 + 0.5;
        return vec4<f32>(diffuseColor.rgb * l, diffuseColor.a);
      }
      `;
    
      const shaderModule = device.createShaderModule({code: shaderSrc});
    
      function createBuffer(device, data, usage) {
        const buffer = device.createBuffer({
          size: data.byteLength,
          usage,
          mappedAtCreation: true,
        });
        const dst = new data.constructor(buffer.getMappedRange());
        dst.set(data);
        buffer.unmap();
        return buffer;
      }
    
      const positions = new Float32Array([1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, -1]);
      const normals   = new Float32Array([1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1]);
      const texcoords = new Float32Array([1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]);
      const indices   = new Uint16Array([0, 1, 2, 0, 2, 3, 4, 5, 6, 4, 6, 7, 8, 9, 10, 8, 10, 11, 12, 13, 14, 12, 14, 15, 16, 17, 18, 16, 18, 19, 20, 21, 22, 20, 22, 23]);
    
      const positionBuffer = createBuffer(device, positions, GPUBufferUsage.VERTEX);
      const normalBuffer = createBuffer(device, normals, GPUBufferUsage.VERTEX);
      const texcoordBuffer = createBuffer(device, texcoords, GPUBufferUsage.VERTEX);
      const indicesBuffer = createBuffer(device, indices, GPUBufferUsage.INDEX);
    
      const tex = device.createTexture({
        size: [2, 2, 1],
        format: 'rgba8unorm',
        usage:
          GPUTextureUsage.TEXTURE_BINDING |
          GPUTextureUsage.COPY_DST,
      });
      device.queue.writeTexture(
          { texture: tex },
          new Uint8Array([
            255, 255, 128, 255,
            128, 255, 255, 255,
            255, 128, 255, 255,
            255, 128, 128, 255,
          ]),
          { bytesPerRow: 8, rowsPerImage: 2 },
          { width: 2, height: 2 },
      );
    
      const sampler = device.createSampler({
        magFilter: 'nearest',
        minFilter: 'nearest',
      });
    
      function createPipelineAndStuff(device) {
        const pipeline = device.createRenderPipeline({
          layout: 'auto',
          vertex: {
            module: shaderModule,
            entryPoint: 'myVSMain',
            buffers: [
              // position
              {
                arrayStride: 3 * 4, // 3 floats, 4 bytes each
                attributes: [
                  {shaderLocation: 0, offset: 0, format: 'float32x3'},
                ],
              },
              // normals
              {
                arrayStride: 3 * 4, // 3 floats, 4 bytes each
                attributes: [
                  {shaderLocation: 1, offset: 0, format: 'float32x3'},
                ],
              },
              // texcoords
              {
                arrayStride: 2 * 4, // 2 floats, 4 bytes each
                attributes: [
                  {shaderLocation: 2, offset: 0, format: 'float32x2',},
                ],
              },
            ],
          },
          fragment: {
            module: shaderModule,
            entryPoint: 'myFSMain',
            targets: [
              {format: presentationFormat},
            ],
          },
          primitive: {
            topology: 'triangle-list',
            cullMode: 'back',
          },
          depthStencil: {
            depthWriteEnabled: true,
            depthCompare: 'less',
            format: 'depth24plus',
          },
          multisample: {
            count: canvasInfo.sampleCount,
          },
        });
    
    
        const vUniformBufferSize = 2 * 16 * 4; // 2 mat4s * 16 floats per mat * 4 bytes per float
        const fUniformBufferSize = 3 * 4 + 4;  // 1 vec3 * 3 floats per vec3 * 4 bytes per float + pad
    
        const vsUniformBuffer = device.createBuffer({
          size: vUniformBufferSize,
          usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
        });
        const fsUniformBuffer = device.createBuffer({
          size: fUniformBufferSize,
          usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
        });
        const vsUniformValues = new Float32Array(vUniformBufferSize / 4); // 2 mat4s
        const worldViewProjection = vsUniformValues.subarray(0, 16);
        const worldInverseTranspose = vsUniformValues.subarray(16, 32);
        const fsUniformValues = new Float32Array(fUniformBufferSize / 4);  // 1 vec3
        const lightDirection = fsUniformValues.subarray(0, 3);
    
        const bindGroup = device.createBindGroup({
          layout: pipeline.getBindGroupLayout(0),
          entries: [
            { binding: 0, resource: { buffer: vsUniformBuffer } },
            { binding: 1, resource: { buffer: fsUniformBuffer } },
            { binding: 2, resource: sampler },
            { binding: 3, resource: tex.createView() },
          ],
        });
    
        const renderPassDescriptor = {
          colorAttachments: [
            {
              // view: undefined, // Assigned later
              // resolveTarget: undefined, // Assigned Later
              clearValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
              loadOp: 'clear',
              storeOp: 'store',
            },
          ],
          depthStencilAttachment: {
            // view: undefined,  // Assigned later
            depthClearValue: 1.0,
            depthLoadOp: 'clear',
            depthStoreOp: 'store',
          },
        };
    
        return {
          pipeline,
          bindGroup,
          renderPassDescriptor,
          vsUniformBuffer,
          fsUniformBuffer,
          vsUniformValues,
          worldViewProjection,
          worldInverseTranspose,
          fsUniformValues,
          lightDirection,
        };
      }
    
    
    
      function resizeToDisplaySize(device, canvasInfo) {
        const {
          canvas,
          context,
          renderTarget,
          presentationSize,
          presentationFormat,
          depthTexture,
          sampleCount,
        } = canvasInfo;
        const width = Math.min(device.limits.maxTextureDimension2D, canvas.clientWidth);
        const height = Math.min(device.limits.maxTextureDimension2D, canvas.clientHeight);
    
        const needResize = !canvasInfo.renderTarget ||
                           width !== presentationSize[0] ||
                           height !== presentationSize[1];
        if (needResize) {
          if (renderTarget) {
            renderTarget.destroy();
          }
          if (depthTexture) {
            depthTexture.destroy();
          }
    
          canvas.width = width;
          canvas.height = height;
          presentationSize[0] = width;
          presentationSize[1] = height;
    
          const newRenderTarget = device.createTexture({
            size: presentationSize,
            format: presentationFormat,
            sampleCount,
            usage: GPUTextureUsage.RENDER_ATTACHMENT,
          });
          canvasInfo.renderTarget = newRenderTarget;
          canvasInfo.renderTargetView = newRenderTarget.createView();
    
          const newDepthTexture = device.createTexture({
            size: presentationSize,
            format: 'depth24plus',
            sampleCount,
            usage: GPUTextureUsage.RENDER_ATTACHMENT,
          });
          canvasInfo.depthTexture = newDepthTexture;
          canvasInfo.depthTextureView = newDepthTexture.createView();
        }
        return needResize;
      }
    
      const p1 = createPipelineAndStuff(device);
      const p2 = createPipelineAndStuff(device);
    
      function drawPipeline(device, commandEncoder, {
          pipeline,
          bindGroup,
          renderPassDescriptor,
          vsUniformBuffer,
          fsUniformBuffer,
          vsUniformValues,
          worldViewProjection,
          worldInverseTranspose,
          fsUniformValues,
          lightDirection,
        }, { time, x, resolve, clear }) {
    
        const projection = mat4.perspective(30 * Math.PI / 180, canvas.clientWidth / canvas.clientHeight, 0.5, 20);
        const eye = [0, 4, 10];
        const target = [0, 0, 0];
        const up = [0, 1, 0];
    
        const view = mat4.lookAt(eye, target, up);
        const viewProjection = mat4.multiply(projection, view);
        const world = mat4.translation([x, 0, 0]);
        mat4.rotateY(world, time, world);
        mat4.rotateX(world, time * 0.1 + x, world);
        mat4.transpose(mat4.inverse(world), worldInverseTranspose);
        mat4.multiply(viewProjection, world, worldViewProjection);
    
        vec3.normalize([1, 8, 10], lightDirection);
    
        device.queue.writeBuffer(vsUniformBuffer, 0, vsUniformValues);
        device.queue.writeBuffer(fsUniformBuffer, 0, fsUniformValues);
    
        renderPassDescriptor.colorAttachments[0].loadOp = clear ? 'clear' : 'load';
        renderPassDescriptor.colorAttachments[0].view = canvasInfo.renderTargetView;
        renderPassDescriptor.colorAttachments[0].resolveTarget = resolve
            ? context.getCurrentTexture().createView()
            : undefined;
        renderPassDescriptor.depthStencilAttachment.view = canvasInfo.depthTextureView;
        renderPassDescriptor.depthStencilAttachment.depthLoadOp = clear ? 'clear' : 'load';
    
        const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
        passEncoder.setPipeline(pipeline);
        passEncoder.setBindGroup(0, bindGroup);
        passEncoder.setVertexBuffer(0, positionBuffer);
        passEncoder.setVertexBuffer(1, normalBuffer);
        passEncoder.setVertexBuffer(2, texcoordBuffer);
        passEncoder.setIndexBuffer(indicesBuffer, 'uint16');
        passEncoder.drawIndexed(indices.length);
        passEncoder.end();
      }
    
      function render(time) {
        time *= 0.001;
        resizeToDisplaySize(device, canvasInfo);
    
        const commandEncoder = device.createCommandEncoder();
    
        drawPipeline(device, commandEncoder, p1, { time, x: -1., resolve: false, clear: true  });
        drawPipeline(device, commandEncoder, p2, { time, x:  1., resolve: true , clear: false });
    
        device.queue.submit([commandEncoder.finish()]);
    
        requestAnimationFrame(render);
      }
      requestAnimationFrame(render);
    }
    
    function fail(msg) {
      const elem = document.querySelector('#fail');
      const contentElem = elem.querySelector('.content');
      elem.style.display = '';
      contentElem.textContent = msg;
    }
    
    main();
      
    </script>