I am trying to implement this effect. As it is explained in the video, I have to make 2 extra renderTargets, blend the current image with renderTarget #1 into renderTarget #2, but I am having difficulties with implementing it in three.js. You can check my code here
let w = window.innerWidth
let h = window.innerHeight
const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()
let frontBuffer = createRenderTarget()
let backBuffer = frontBuffer.clone()
let readBuffer = frontBuffer
let writeBuffer = backBuffer
const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
uniforms: {
tDiffuse: { value: writeBuffer.texture }
},
vertexShader: `
varying vec2 vUv;
void main () {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
vUv = uv;
}
`,
fragmentShader: `
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main () {
gl_FragColor = texture2D(tDiffuse, vUv);
}
`
})
const renderMesh = new THREE.Mesh(
new THREE.PlaneBufferGeometry(w, h),
renderMaterial
)
renderMesh.rotation.x += Math.PI
renderScene.add(renderMesh)
let timeElapsed = 0
let shape
setMainScene()
renderFrame()
function createRenderTarget () {
let type = THREE.FloatType
if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType
let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
type,
wrapS: THREE.ClampToEdgeWrapping,
wrapT: THREE.ClampToEdgeWrapping,
format: THREE.RGBAFormat,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
stencilBuffer: false,
depthBuffer: true
})
renderTarget.texture.generateMipmaps = false
renderTarget.setSize(w, h)
return renderTarget
}
function swapBuffers () {
if (readBuffer === frontBuffer) {
readBuffer = backBuffer
writeBuffer = frontBuffer
} else {
readBuffer = frontBuffer
writeBuffer = backBuffer
}
}
function setMainScene () {
renderer.setSize(w, h)
renderer.setClearColor(0x111111)
renderer.setPixelRatio(window.devicePixelRatio || 1)
document.body.appendChild(renderer.domElement)
camera.position.set(0, 20, 100)
camera.lookAt(new THREE.Vector3())
shape = new THREE.Mesh(
new THREE.SphereBufferGeometry(10, 20, 20),
new THREE.MeshBasicMaterial({ color: 0xFF0000 })
)
scene.add(shape)
}
function renderFrame () {
requestAnimationFrame(renderFrame)
renderer.render(scene, camera, writeBuffer)
renderer.render(renderScene, renderCamera)
swapBuffers()
timeElapsed += clock.getDelta()
shape.position.x = Math.sin(timeElapsed) * 20.0
shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>
First, I create my two extra framebuffers:
let frontBuffer = createRenderTarget()
let backBuffer = frontBuffer.clone()
let readBuffer = frontBuffer
let writeBuffer = backBuffer
function createRenderTarget () {
let type = THREE.FloatType
if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType
let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
type,
wrapS: THREE.ClampToEdgeWrapping,
wrapT: THREE.ClampToEdgeWrapping,
format: THREE.RGBAFormat,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
stencilBuffer: false,
depthBuffer: true
})
renderTarget.texture.generateMipmaps = false
renderTarget.setSize(w, h)
return renderTarget
}
Then I create an extra scene, a plane (to which I will render my main scene) covering the screen and a orthographic camera. I pass the result image of the main scene render as a uniform to my post-processing plane:
const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
uniforms: {
tDiffuse: { value: writeBuffer.texture }
},
vertexShader: `
varying vec2 vUv;
void main () {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
vUv = uv;
}
`,
fragmentShader: `
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main () {
gl_FragColor = texture2D(tDiffuse, vUv);
}
`
})
Finally, in my animation loop, I first render the main scene to the current fbo and then render my post-processing plane and I swap my buffers:
function swapBuffers () {
if (readBuffer === frontBuffer) {
readBuffer = backBuffer
writeBuffer = frontBuffer
} else {
readBuffer = frontBuffer
writeBuffer = backBuffer
}
}
function renderFrame () {
requestAnimationFrame(renderFrame)
renderer.render(scene, camera, writeBuffer)
renderer.render(renderScene, renderCamera)
swapBuffers()
timeElapsed += clock.getDelta()
shape.position.x = Math.sin(timeElapsed) * 20.0
shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}
This is all fine and good and I can see my main scene render shown on the post-processing plane, but I can't understand how to blend it with the previous framebuffer. I guess I am very wrong in my current implementation, but information is scarce and I simply can't wrap my head around how to achieve this blending.
I tried passing both of my buffers as textures and then blending between them in GLSL, like this:
// js
uniforms: {
tDiffuse1: { value: writeBuffer.texture },
tDiffuse2: { value: readBuffer.texture }
}
// glsl
gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.5);
But visually I don't see any blending going on.
You need 3 render targets. Let's call them sceneTarget
, previousTarget
, resultTarget
Step 1: Render your scene to the sceneTarget
.
You now have your scene in sceneTarget.texture
Step 2: Blend sceneTarget.texture
with previousTarget.texture
into resultTarget
This one you need 2 textures as input like you mentioned at the bottom of your question. You need to update the material uniforms to use the correct textures every frame
renderMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
renderMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;
Now you have a blended result in resultTarget.texture
Step 3: render resultTarget.texture
to the canvas.
Now you can actually see the result.
Step 4: swap resultTarget
and previousTarget
let w = window.innerWidth
let h = window.innerHeight
const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()
let sceneTarget = createRenderTarget()
let previousTarget = sceneTarget.clone();
let resultTarget = sceneTarget.clone();
const blendScene = new THREE.Scene();
const blendCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const blendMaterial = new THREE.ShaderMaterial({
uniforms: {
tDiffuse1: { value: previousTarget.texture },
tDiffuse2: { value: sceneTarget.texture },
},
vertexShader: `
varying vec2 vUv;
void main () {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
vUv = uv;
}
`,
fragmentShader: `
uniform sampler2D tDiffuse1;
uniform sampler2D tDiffuse2;
varying vec2 vUv;
void main () {
gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.25);
}
`,
});
const blendMesh = new THREE.Mesh(
new THREE.PlaneBufferGeometry(w, h),
blendMaterial
);
blendMesh.rotation.x = Math.PI;
blendScene.add(blendMesh);
const resultScene = new THREE.Scene();
const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const resultMaterial = new THREE.MeshBasicMaterial({
map: resultTarget.texture,
});
const resultMesh = new THREE.Mesh(
new THREE.PlaneBufferGeometry(w, h),
resultMaterial
);
resultMesh.rotation.x = Math.PI;
resultScene.add(resultMesh);
let shape
setMainScene()
renderFrame(0)
function createRenderTarget () {
let type = THREE.FloatType
if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType
let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
type,
wrapS: THREE.ClampToEdgeWrapping,
wrapT: THREE.ClampToEdgeWrapping,
format: THREE.RGBAFormat,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
stencilBuffer: false,
depthBuffer: true
})
renderTarget.texture.generateMipmaps = false
renderTarget.setSize(w, h)
return renderTarget
}
function swapBuffers () {
const temp = previousTarget;
previousTarget = resultTarget;
resultTarget = temp;
}
function setMainScene () {
renderer.setSize(w, h)
renderer.setClearColor(0x111111)
renderer.setPixelRatio(window.devicePixelRatio || 1)
document.body.appendChild(renderer.domElement)
camera.position.set(0, 20, 100);
camera.lookAt(new THREE.Vector3());
shape = new THREE.Mesh(
new THREE.SphereBufferGeometry(10, 20, 20),
new THREE.MeshBasicMaterial({ color: 0xFF0000 })
);
scene.add(shape);
}
function renderFrame (timeElapsed) {
timeElapsed *= 0.001;
renderer.render(scene, camera, sceneTarget);
blendMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
blendMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;
renderer.render(blendScene, blendCamera, resultTarget);
resultMaterial.map = resultTarget.texture;
renderer.render(resultScene, resultCamera);
swapBuffers();
shape.position.x = Math.sin(timeElapsed) * 20.0;
shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
requestAnimationFrame(renderFrame);
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>
Let me also add that's not really a good persistence affect. I'm not sure what the best one is. The problem with the one above is the higher you set the persistence the less you see of the current frame.
A better one, though it requires choosing a fade out color, would be something like this. Only 2 targets needed, previousTarget
and currentTarget
Render previousTarget.texture
to currentTarget
with a shader
that fades to a certain color. mix(tex, color, 0.05)
or something like that.
Render the scene to currentTarget
as well
Render currentTarget.texture
to canvas
Swap currentTarget
and previousTarget
let w = window.innerWidth
let h = window.innerHeight
const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()
let currentTarget = createRenderTarget()
let previousTarget = currentTarget.clone();
const fadeScene = new THREE.Scene();
const fadeCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const fadeMaterial = new THREE.ShaderMaterial({
uniforms: {
tDiffuse: { value: previousTarget.texture },
},
vertexShader: `
varying vec2 vUv;
void main () {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
vUv = uv;
}
`,
fragmentShader: `
uniform sampler2D tDiffuse;
varying vec2 vUv;
void main () {
vec4 fadeColor = vec4(0,0,0,1);
gl_FragColor = mix(texture2D(tDiffuse, vUv), fadeColor, 0.05);
}
`,
});
const fadeMesh = new THREE.Mesh(
new THREE.PlaneBufferGeometry(w, h),
fadeMaterial
);
fadeMesh.rotation.x = Math.PI;
fadeScene.add(fadeMesh);
const resultScene = new THREE.Scene();
const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const resultMaterial = new THREE.MeshBasicMaterial({
map: currentTarget.texture,
});
const resultMesh = new THREE.Mesh(
new THREE.PlaneBufferGeometry(w, h),
resultMaterial
);
resultMesh.rotation.x = Math.PI;
resultScene.add(resultMesh);
let shape
setMainScene()
renderFrame(0)
function createRenderTarget () {
let type = THREE.FloatType
if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType
let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
type,
wrapS: THREE.ClampToEdgeWrapping,
wrapT: THREE.ClampToEdgeWrapping,
format: THREE.RGBAFormat,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter,
stencilBuffer: false,
depthBuffer: true
})
renderTarget.texture.generateMipmaps = false
renderTarget.setSize(w, h)
return renderTarget
}
function swapBuffers () {
const temp = previousTarget;
previousTarget = currentTarget;
currentTarget = temp;
}
function setMainScene () {
renderer.setSize(w, h)
renderer.setClearColor(0x111111)
renderer.setPixelRatio(window.devicePixelRatio || 1)
renderer.autoClearColor = false;
document.body.appendChild(renderer.domElement)
camera.position.set(0, 20, 100);
camera.lookAt(new THREE.Vector3());
shape = new THREE.Mesh(
new THREE.SphereBufferGeometry(10, 20, 20),
new THREE.MeshBasicMaterial({ color: 0xFF0000 })
);
scene.add(shape);
}
function renderFrame (timeElapsed) {
timeElapsed *= 0.001;
fadeMaterial.uniforms.tDiffuse.value = previousTarget.texture;
renderer.render(fadeScene, fadeCamera, currentTarget);
renderer.render(scene, camera, currentTarget);
resultMaterial.map = currentTarget.texture;
renderer.render(resultScene, resultCamera);
swapBuffers();
shape.position.x = Math.sin(timeElapsed) * 20.0;
shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
requestAnimationFrame(renderFrame);
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>