Skip to content
Snippets Groups Projects
Commit c0719894 authored by Alexander Gauggel's avatar Alexander Gauggel
Browse files

[#106] Implemented proper motion blur reconstruction filter based on sample depth and velocity

parent 66b59781
No related branches found
No related tags found
1 merge request!89Resolve "Indirect Dispatch"
Pipeline #26780 failed
...@@ -2,58 +2,125 @@ ...@@ -2,58 +2,125 @@
#extension GL_GOOGLE_include_directive : enable #extension GL_GOOGLE_include_directive : enable
layout(set=0, binding=0) uniform texture2D inColor; layout(set=0, binding=0) uniform texture2D inColor;
layout(set=0, binding=1) uniform texture2D inMotion; layout(set=0, binding=1) uniform texture2D inDepth;
layout(set=0, binding=2) uniform sampler textureSampler; layout(set=0, binding=2) uniform texture2D inMotion;
layout(set=0, binding=3, r11f_g11f_b10f) uniform image2D outImage; layout(set=0, binding=3) uniform sampler nearestSampler;
layout(set=0, binding=4, r11f_g11f_b10f) uniform image2D outImage;
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in; layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout( push_constant ) uniform constants{ layout( push_constant ) uniform constants{
// computed from delta time and shutter speed float motionFactor; // computed from delta time and shutter speed
float motionFactor;
float minVelocity; float minVelocity;
// camera planes are needed to linearize depth
float cameraNearPlane;
float cameraFarPlane;
}; };
float linearizeDepth(float depth, float near, float far){
return near * far / (far + depth * (near - far));
}
struct SampleData{
float depthLinear;
vec2 uv;
vec2 motion;
float velocity;
};
// estimates if a points lies within the influence of another point
// uv1 and uv2 can be interchanged, the velocity belongs to the point whose influence is estimated
float cone(vec2 uv1, vec2 uv2, float velocity){
return clamp(1 - distance(uv1, uv2) / velocity, 0, 1);
}
// similar to cone, but with a different shape
// see paper for usage details
float cylinder(vec2 uv1, vec2 uv2, float velocity){
return 1 - smoothstep(0.95 * velocity, 1.05 * velocity, distance(uv1, uv2));
}
// checks if depth2 is closer than depth1, result within range [0, 1]
float softDepthCompare(float depth1, float depth2){
float softDepthExtent = 0.1;
return clamp(1 - (depth1 - depth2) / softDepthExtent, 0, 1);
}
// reconstruction filter and helper functions from "A Reconstruction Filter for Plausible Motion Blur", McGuire
float computeSampleWeigth(SampleData mainPixel, SampleData samplePixel){
float foreground = softDepthCompare( mainPixel.depthLinear, samplePixel.depthLinear);
float background = softDepthCompare(samplePixel.depthLinear, mainPixel.depthLinear);
// blurry sample in front of main pixel
float weight = foreground * cone(mainPixel.uv, samplePixel.uv, samplePixel.velocity);
// any sample behind blurry main pixel: estimate background by using sample
weight += background * cone(mainPixel.uv, samplePixel.uv, mainPixel.velocity);
// both main pixel and sample are blurry and overlap
weight += 2 * cylinder(mainPixel.uv, samplePixel.uv, mainPixel.velocity) * cylinder(mainPixel.uv, samplePixel.uv, samplePixel.velocity);
return weight;
}
SampleData loadSampleData(vec2 uv){
SampleData data;
data.uv = uv;
data.motion = texture(sampler2D(inMotion, nearestSampler), uv).rg * motionFactor;
data.velocity = length(data.motion);
data.depthLinear = texture(sampler2D(inDepth, nearestSampler), uv).r;
data.depthLinear = linearizeDepth(data.depthLinear, cameraNearPlane, cameraFarPlane);
return data;
}
void main(){ void main(){
if(any(greaterThanEqual(gl_GlobalInvocationID.xy, imageSize(outImage)))) if(any(greaterThanEqual(gl_GlobalInvocationID.xy, imageSize(outImage))))
return; return;
ivec2 textureRes = textureSize(sampler2D(inColor, textureSampler), 0); ivec2 textureRes = textureSize(sampler2D(inColor, nearestSampler), 0);
ivec2 coord = ivec2(gl_GlobalInvocationID.xy); ivec2 coord = ivec2(gl_GlobalInvocationID.xy);
vec2 uv = vec2(coord) / textureRes; vec2 uv = vec2(coord + 0.5) / textureRes; // + 0.5 to shift uv into pixel center
vec2 motion = texture(sampler2D(inMotion, textureSampler), uv).rg; SampleData mainPixel = loadSampleData(uv);
motion *= motionFactor;
float velocity = length(motion);
// early out on little movement // early out on little movement
if(velocity < minVelocity){ if(mainPixel.velocity <= minVelocity){
vec3 color = texture(sampler2D(inColor, textureSampler), uv).rgb; vec3 color = texture(sampler2D(inColor, nearestSampler), uv).rgb;
imageStore(outImage, coord, vec4(color, 0.f)); imageStore(outImage, coord, vec4(color, 0.f));
return; return;
} }
// TODO: should be configurable by user or computed by velocity tile sizes // TODO: check if a max velocity is necessary
const float maxBlurDistance = 0.075; // // TODO: should be configurable by user or computed by velocity tile sizes
if(velocity > maxBlurDistance){ // const float maxBlurDistance = 0.075;
motion *= maxBlurDistance / velocity; // if(mainPixel.velocity > maxBlurDistance)
} // motion *= maxBlurDistance / velocity;
vec3 color = vec3(0); vec3 color = vec3(0);
const int sampleCount = 16; float weightSum = 0;
const int sampleCount = 16;
// clamping start and end points avoids artifacts at image borders // clamping start and end points avoids artifacts at image borders
// the sampler clamps the sample uvs anyways, but without clamping here, many samples can be stuck at the border // the sampler clamps the sample uvs anyways, but without clamping here, many samples can be stuck at the border
vec2 uvStart = clamp(uv - motion, 0, 1); vec2 uvStart = clamp(uv - mainPixel.motion, 0, 1);
vec2 uvEnd = clamp(uv + motion, 0, 1); vec2 uvEnd = clamp(uv + mainPixel.motion, 0, 1);
for(int i = 0; i < sampleCount; i++){ for(int i = 0; i < sampleCount; i++){
vec2 sampleUV = mix(uvStart, uvEnd, i / float(sampleCount - 1)); vec2 sampleUV = mix(uvStart, uvEnd, i / float(sampleCount - 1));
color += texture(sampler2D(inColor, textureSampler), sampleUV).rgb; vec3 sampleColor = texture(sampler2D(inColor, nearestSampler), sampleUV).rgb;
SampleData samplePixel = loadSampleData(sampleUV);
float weightSample = computeSampleWeigth(mainPixel, samplePixel);
weightSum += weightSample;
color += sampleColor * weightSample;
} }
color /= sampleCount; color /= weightSum;
imageStore(outImage, coord, vec4(color, 0.f)); imageStore(outImage, coord, vec4(color, 0.f));
} }
\ No newline at end of file
...@@ -277,11 +277,12 @@ void App::run() { ...@@ -277,11 +277,12 @@ void App::run() {
vkcv::DescriptorWrites motionBlurDescriptorWrites; vkcv::DescriptorWrites motionBlurDescriptorWrites;
motionBlurDescriptorWrites.sampledImageWrites = { motionBlurDescriptorWrites.sampledImageWrites = {
vkcv::SampledImageDescriptorWrite(0, m_renderTargets.colorBuffer), vkcv::SampledImageDescriptorWrite(0, m_renderTargets.colorBuffer),
vkcv::SampledImageDescriptorWrite(1, motionBuffer) }; vkcv::SampledImageDescriptorWrite(1, m_renderTargets.depthBuffer),
vkcv::SampledImageDescriptorWrite(2, motionBuffer) };
motionBlurDescriptorWrites.samplerWrites = { motionBlurDescriptorWrites.samplerWrites = {
vkcv::SamplerDescriptorWrite(2, m_linearSampler) }; vkcv::SamplerDescriptorWrite(3, m_nearestSampler) };
motionBlurDescriptorWrites.storageImageWrites = { motionBlurDescriptorWrites.storageImageWrites = {
vkcv::StorageImageDescriptorWrite(3, m_renderTargets.motionBlurOutput) }; vkcv::StorageImageDescriptorWrite(4, m_renderTargets.motionBlurOutput) };
m_core.writeDescriptorSet(m_motionBlurPass.descriptorSet, motionBlurDescriptorWrites); m_core.writeDescriptorSet(m_motionBlurPass.descriptorSet, motionBlurDescriptorWrites);
...@@ -292,23 +293,37 @@ void App::run() { ...@@ -292,23 +293,37 @@ void App::run() {
m_core.prepareImageForStorage(cmdStream, m_renderTargets.motionBlurOutput); m_core.prepareImageForStorage(cmdStream, m_renderTargets.motionBlurOutput);
m_core.prepareImageForSampling(cmdStream, m_renderTargets.colorBuffer); m_core.prepareImageForSampling(cmdStream, m_renderTargets.colorBuffer);
m_core.prepareImageForSampling(cmdStream, m_renderTargets.depthBuffer);
m_core.prepareImageForSampling(cmdStream, motionBuffer); m_core.prepareImageForSampling(cmdStream, motionBuffer);
const float microsecondToSecond = 0.000001; const float microsecondToSecond = 0.000001;
const float fDeltatimeSeconds = microsecondToSecond * std::chrono::duration_cast<std::chrono::microseconds>(frameEndTime - frameStartTime).count(); const float fDeltatimeSeconds = microsecondToSecond * std::chrono::duration_cast<std::chrono::microseconds>(frameEndTime - frameStartTime).count();
// must match layout in "motionBlur.comp"
struct MotionBlurConstantData {
float motionFactor;
float minVelocity;
float cameraNearPlane;
float cameraFarPlane;
};
MotionBlurConstantData motionBlurConstantData;
// small mouse movements are restricted to pixel level and therefore quite unprecise // small mouse movements are restricted to pixel level and therefore quite unprecise
// therefore extrapolating movement at high framerates results in big jerky movements // therefore extrapolating movement at high framerates results in big jerky movements
// this results in wide sudden motion blur, which looks quite bad // this results in wide sudden motion blur, which looks quite bad
// as a workaround the time scale is limited to a maximum value // as a workaround the time scale is limited to a maximum value
const float motionBlurTimeScaleMax = 1.f / 60; const float motionBlurTimeScaleMax = 1.f / 60;
const float deltaTimeMotionBlur = std::max(fDeltatimeSeconds, motionBlurTimeScaleMax); const float deltaTimeMotionBlur = std::max(fDeltatimeSeconds, motionBlurTimeScaleMax);
const float motionBlurMotionFactor = 1 / (deltaTimeMotionBlur * cameraShutterSpeedInverse);
vkcv::PushConstants motionBlurPushConstants(sizeof(float) * 2); motionBlurConstantData.motionFactor = 1 / (deltaTimeMotionBlur * cameraShutterSpeedInverse);
motionBlurConstantData.minVelocity = motionBlurMinVelocity;
float motionBlurConstantData[2] = { motionBlurMotionFactor, motionBlurMinVelocity }; float cameraNear, cameraFar;
m_cameraManager.getActiveCamera().getNearFar(cameraNear, cameraFar);
motionBlurConstantData.cameraNearPlane = cameraNear;
motionBlurConstantData.cameraFarPlane = cameraFar;
vkcv::PushConstants motionBlurPushConstants(sizeof(motionBlurConstantData));
motionBlurPushConstants.appendDrawcall(motionBlurConstantData); motionBlurPushConstants.appendDrawcall(motionBlurConstantData);
m_core.recordComputeDispatchToCmdStream( m_core.recordComputeDispatchToCmdStream(
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment