diff --git a/projects/indirect_dispatch/resources/shaders/motionBlur.comp b/projects/indirect_dispatch/resources/shaders/motionBlur.comp index 459416b83f85a1a034005ff2ebe00f5f2c6d60e3..804c0ceb888f9d4ae73c5d412bde2a9c0cebef72 100644 --- a/projects/indirect_dispatch/resources/shaders/motionBlur.comp +++ b/projects/indirect_dispatch/resources/shaders/motionBlur.comp @@ -175,21 +175,50 @@ void main(){ // the sampleUV code expects an offset in range [-0.5, 0.5], so the dither is rescaled to a binary -0.25/0.25 float random = dither(coord) * 0.5 - 0.25; - const int sampleCount = 15; + const int sampleCountHalf = 8; - for(int i = 0; i < sampleCount; i++){ - vec2 sampleUV = mix(uvStart, uvEnd, (i + random + 1) / float(sampleCount + 1)); + // two samples are processed at a time to allow for mirrored background reconstruction + for(int i = 0; i < sampleCountHalf; i++){ - SampleData samplePixel = loadSampleData(sampleUV); - float weightSample = computeSampleWeigth(mainPixel, samplePixel); + vec2 sampleUV1 = mix(uv, uvEnd, (i + random + 1) / float(sampleCountHalf + 1)); + vec2 sampleUV2 = mix(uv, uvStart, (i + random + 1) / float(sampleCountHalf + 1)); - weightSum += weightSample; - color += samplePixel.color * weightSample; + SampleData sample1 = loadSampleData(sampleUV1); + SampleData sample2 = loadSampleData(sampleUV2); + + float weight1 = computeSampleWeigth(mainPixel, sample1); + float weight2 = computeSampleWeigth(mainPixel, sample2); + + bool mirroredBackgroundReconstruction = true; + if(mirroredBackgroundReconstruction){ + // see Jimenez paper for details and comparison + // problem is that in the foreground the background is reconstructed, which is blurry + // in the background the background is obviously known, so it is sharper + // at the border between fore- and background this causes a discontinuity + // to fix this the weights are mirrored on this border, effectively reconstructing the background, even though it is known + + // these bools check if sample1 is an affected background pixel (further away and slower moving than sample2) + bool inBackground = sample1.depthLinear > sample2.depthLinear; + bool blurredOver = sample1.velocityPixels < sample2.velocityPixels; + + // this mirrors the weights depending on the results: + // if both conditions are true, then weight2 is mirrored to weight1 + // if both conditions are false, then weight1 is mirrored to weight2, as sample2 is an affected background pixel + // if only one condition is true, then the weights are kept as is + weight1 = inBackground && blurredOver ? weight2 : weight1; + weight2 = inBackground || blurredOver ? weight2 : weight1; + } + + weightSum += weight1; + weightSum += weight2; + + color += sample1.color * weight1; + color += sample2.color * weight2; } // normalize color and weight - weightSum /= sampleCount; - color /= sampleCount; + weightSum /= sampleCountHalf * 2; + color /= sampleCountHalf * 2; // the main color is considered the background // the weight sum can be interpreted as the alpha of the combined samples, see Jimenez paper