diff --git a/projects/indirect_dispatch/resources/models/Grid.png b/projects/indirect_dispatch/resources/models/Grid.png
new file mode 100644
index 0000000000000000000000000000000000000000..5f40eee62f7f9dba3dc156ff6a3653ea2e7f5391
--- /dev/null
+++ b/projects/indirect_dispatch/resources/models/Grid.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a11c33e4935d93723ab11f597f2aca1ca1ff84af66f2e2d10a01580eb0b7831a
+size 40135
diff --git a/projects/indirect_dispatch/resources/models/ground.bin b/projects/indirect_dispatch/resources/models/ground.bin
new file mode 100644
index 0000000000000000000000000000000000000000..e29e4f18552def1ac64c167d994be959f82e35c7
--- /dev/null
+++ b/projects/indirect_dispatch/resources/models/ground.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f8e20cd1c62da3111536283517b63a149f258ea82b1dff8ddafdb79020065b7c
+size 140
diff --git a/projects/indirect_dispatch/resources/models/ground.gltf b/projects/indirect_dispatch/resources/models/ground.gltf
new file mode 100644
index 0000000000000000000000000000000000000000..6e8d49502b54d1fae06fa55040f3ebd7a33911d7
--- /dev/null
+++ b/projects/indirect_dispatch/resources/models/ground.gltf
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:158b9c73a199aabaa1b0be99d81e5b5c13a963458916783d1c113f0f03c6c898
+size 2836
diff --git a/projects/indirect_dispatch/resources/shaders/mesh.frag b/projects/indirect_dispatch/resources/shaders/mesh.frag
index 46d808c4eddb6bf87273219961c9c36db59dab74..7da116a50b330b531de7e1f0a80a58fd8a0277d8 100644
--- a/projects/indirect_dispatch/resources/shaders/mesh.frag
+++ b/projects/indirect_dispatch/resources/shaders/mesh.frag
@@ -7,6 +7,8 @@ layout(location = 1) in vec3 passPos;
 layout(location = 0) out vec3 outColor;
 
 void main()	{
-	// outColor = passNormal * 0.5 + 0.5;
-    outColor = vec3(sin(passPos.y * 100) * 0.5 + 0.5);
+    vec3    albedo  = vec3(sin(passPos.y * 100) * 0.5 + 0.5);
+    vec3    N       = normalize(passNormal);
+    float   light   = max(N.y * 0.5 + 0.5, 0);
+    outColor = light * albedo;
 }
\ No newline at end of file
diff --git a/projects/indirect_dispatch/resources/shaders/motionBlur.comp b/projects/indirect_dispatch/resources/shaders/motionBlur.comp
index 5c1960162a00d4d2689e218641431c08565bc8fa..58f95854a86f347a022c2f988d7ed6e163bd5248 100644
--- a/projects/indirect_dispatch/resources/shaders/motionBlur.comp
+++ b/projects/indirect_dispatch/resources/shaders/motionBlur.comp
@@ -4,9 +4,10 @@
 
 layout(set=0, binding=0)                    uniform texture2D   inColor;
 layout(set=0, binding=1)                    uniform texture2D   inDepth;
-layout(set=0, binding=2)                    uniform texture2D   inMotion;   
-layout(set=0, binding=3)                    uniform sampler     nearestSampler;
-layout(set=0, binding=4, r11f_g11f_b10f)    uniform image2D     outImage;
+layout(set=0, binding=2)                    uniform texture2D   inMotionFullRes;
+layout(set=0, binding=3)                    uniform texture2D   inMotionNeighbourhoodMax;  
+layout(set=0, binding=4)                    uniform sampler     nearestSampler;
+layout(set=0, binding=5, r11f_g11f_b10f)    uniform image2D     outImage;
 
 layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
 
@@ -23,6 +24,7 @@ float linearizeDepth(float depth, float near, float far){
 }
 
 struct SampleData{
+    vec3    color;
     float   depthLinear;
     vec2    uv;
     vec2    motion;
@@ -41,32 +43,34 @@ float cylinder(vec2 uv1, vec2 uv2, float velocity){
     return 1 - smoothstep(0.95 * velocity, 1.05 * velocity, distance(uv1, uv2));
 }
 
-// checks if depth2 is closer than depth1, result within range [0, 1]
+// checks if depth1 is closer than depth2, result within range [0, 1]
 float softDepthCompare(float depth1, float depth2){
-    float softDepthExtent = 0.1;
+    float softDepthExtent = 0.0001;
     return clamp(1 - (depth1 - depth2) / softDepthExtent, 0, 1);
 }
 
 // reconstruction filter and helper functions from "A Reconstruction Filter for Plausible Motion Blur", McGuire
 float computeSampleWeigth(SampleData mainPixel, SampleData samplePixel){
     
-    float foreground = softDepthCompare(  mainPixel.depthLinear,  samplePixel.depthLinear);
-    float background = softDepthCompare(samplePixel.depthLinear,    mainPixel.depthLinear);
+    float foreground = softDepthCompare(samplePixel.depthLinear,    mainPixel.depthLinear);
+    float background = softDepthCompare(  mainPixel.depthLinear,  samplePixel.depthLinear);
+    
+    float weight = 0;
     
     // blurry sample in front of main pixel
-    float weight = foreground * cone(mainPixel.uv, samplePixel.uv, samplePixel.velocity);
+    weight += foreground * cone(mainPixel.uv, samplePixel.uv, samplePixel.velocity);
     
     // any sample behind blurry main pixel: estimate background by using sample
     weight += background * cone(mainPixel.uv, samplePixel.uv, mainPixel.velocity);
     
     // both main pixel and sample are blurry and overlap
     weight += 2 * cylinder(mainPixel.uv, samplePixel.uv, mainPixel.velocity) * cylinder(mainPixel.uv, samplePixel.uv, samplePixel.velocity);
-    
+
     return weight;
 }
 
 // see "A Reconstruction Filter for Plausible Motion Blur", section 2.2
-vec2 rescaleMotion(vec2 motion){
+vec2 processMotionVector(vec2 motion){
     // every frame a pixel should blur over the distance it moves
     // as we blur in two directions (where it was and where it will be) we must half the motion 
     vec2 motionHalf     = motion * 0.5;
@@ -83,8 +87,9 @@ vec2 rescaleMotion(vec2 motion){
 SampleData loadSampleData(vec2 uv){
     
     SampleData data;
-    data.uv             = uv;
-    data.motion         = rescaleMotion(texture(sampler2D(inMotion, nearestSampler), uv).rg);
+    data.color          = texture(sampler2D(inColor, nearestSampler), uv).rgb;
+    data.uv             = (ivec2(uv * imageSize(outImage)) + 0.5) / imageSize(outImage);    // quantize to integer coordinates, then move to pixel center and compute final uv
+    data.motion         = processMotionVector(texture(sampler2D(inMotionFullRes, nearestSampler), uv).rg);
     data.velocity       = length(data.motion);
     data.depthLinear    = texture(sampler2D(inDepth, nearestSampler), uv).r;
     data.depthLinear    = linearizeDepth(data.depthLinear, cameraNearPlane, cameraFarPlane);
@@ -112,24 +117,33 @@ void main(){
     ivec2   textureRes  = textureSize(sampler2D(inColor, nearestSampler), 0);
     ivec2   coord       = ivec2(gl_GlobalInvocationID.xy);
     vec2    uv          = vec2(coord + 0.5) / textureRes;   // + 0.5 to shift uv into pixel center
-
+    
+    vec2 motionNeighbourhoodMax = processMotionVector(texture(sampler2D(inMotionNeighbourhoodMax, nearestSampler), uv).rg);
+    
     SampleData mainPixel = loadSampleData(uv);
     
     // early out on little movement
-    if(mainPixel.velocity <= minVelocity){
-        vec3 color = texture(sampler2D(inColor, nearestSampler), uv).rgb;        
-        imageStore(outImage, coord, vec4(color, 0.f));
+    if(length(motionNeighbourhoodMax) <= minVelocity){
+        imageStore(outImage, coord, vec4(mainPixel.color, 0.f));
         return;
     }
-
-    vec3        color       = vec3(0);
-    float       weightSum   = 0;
-    const int   sampleCount = 16; 
+    
+    // the main pixel always contributes to the motion blur
+    // however if it is spread across multiple pixels, it should distribute it's color evenly among all of them (assuming a linear motion)
+    // because of this the pixel motion is translated into pixels
+    // for example if a pixel covers a five pixel distance, then it's weight is 1 / 5
+    float   mainPixelCoverageLength = max(length(mainPixel.motion * imageSize(outImage)), 1);   // max 1 because a pixel can't cover less than it's size
+    float   mainPixelWeight         = 1.f / mainPixelCoverageLength;
+    
+    vec3    color           = mainPixel.color * mainPixelWeight;
+    float   weightSum       = mainPixelWeight;
+    
+    const int sampleCount = 15; 
     
     // clamping start and end points avoids artifacts at image borders
     // the sampler clamps the sample uvs anyways, but without clamping here, many samples can be stuck at the border
-    vec2 uvStart    = clamp(uv - mainPixel.motion, 0, 1);
-    vec2 uvEnd      = clamp(uv + mainPixel.motion, 0, 1);
+    vec2 uvStart    = clamp(uv - motionNeighbourhoodMax, 0, 1);
+    vec2 uvEnd      = clamp(uv + motionNeighbourhoodMax, 0, 1);
     
     // samples are placed evenly, but the entire filter is jittered
     // dither returns either 0 or 1
@@ -137,14 +151,13 @@ void main(){
     float random = dither(coord) * 0.5 - 0.25;
     
     for(int i = 0; i < sampleCount; i++){
-        vec2    sampleUV    = mix(uvStart, uvEnd, (i + random + 1) / float(sampleCount + 1));
-        vec3    sampleColor = texture(sampler2D(inColor, nearestSampler), sampleUV).rgb;
+        vec2 sampleUV = mix(uvStart, uvEnd, (i + random + 1) / float(sampleCount + 1));
         
         SampleData  samplePixel     = loadSampleData(sampleUV);
         float       weightSample    = computeSampleWeigth(mainPixel, samplePixel);
-        
+
         weightSum   += weightSample;
-        color       += sampleColor * weightSample;
+        color       += samplePixel.color * weightSample;
     }
     
     color /= weightSum;
diff --git a/projects/indirect_dispatch/src/App.cpp b/projects/indirect_dispatch/src/App.cpp
index 3b9992ed467145bfa203859c16b05a4f25c69391..9ff20a0e680a05bb3b95e0348b9db835cc30c191 100644
--- a/projects/indirect_dispatch/src/App.cpp
+++ b/projects/indirect_dispatch/src/App.cpp
@@ -44,6 +44,9 @@ bool App::initialize() {
 	if (!loadMesh(m_core, "resources/models/cube.gltf", &m_cubeMesh))
 		return false;
 
+	if (!loadMesh(m_core, "resources/models/ground.gltf", &m_groundMesh))
+		return false;
+
 	if (!m_motionBlur.initialize(&m_core, m_windowWidth, m_windowHeight))
 		return false;
 
@@ -56,7 +59,8 @@ bool App::initialize() {
 	m_renderTargets = createRenderTargets(m_core, m_windowWidth, m_windowHeight);
 
 	const int cameraIndex = m_cameraManager.addCamera(vkcv::camera::ControllerType::PILOT);
-	m_cameraManager.getCamera(cameraIndex).setPosition(glm::vec3(0, 0, -3));
+	m_cameraManager.getCamera(cameraIndex).setPosition(glm::vec3(0, 1, -3));
+	m_cameraManager.getCamera(cameraIndex).setNearFar(0.1f, 30.f);
 	
 	return true;
 }
@@ -66,7 +70,8 @@ void App::run() {
 	auto                        frameStartTime = std::chrono::system_clock::now();
 	const auto                  appStartTime   = std::chrono::system_clock::now();
 	const vkcv::ImageHandle     swapchainInput = vkcv::ImageHandle::createSwapchainImageHandle();
-	const vkcv::DrawcallInfo    sphereDrawcall(m_sphereMesh.mesh, {}, 1);
+	const vkcv::DrawcallInfo    drawcallSphere(m_sphereMesh.mesh, {}, 1);
+	const vkcv::DrawcallInfo    drawcallGround(m_groundMesh.mesh, {}, 1);
 	const vkcv::DrawcallInfo    cubeDrawcall(m_cubeMesh.mesh, {}, 1);
 
 	vkcv::gui::GUI gui(m_core, m_window);
@@ -92,8 +97,10 @@ void App::run() {
 	int     cameraShutterSpeedInverse       = 24;
 	float   motionVectorVisualisationRange  = 0.008;
 
-	glm::mat4 mvpPrevious               = glm::mat4(1.f);
+	glm::mat4 mvpSpherePrevious         = glm::mat4(1.f);
+    glm::mat4 mvpGroundPrevious         = glm::mat4(1.f);
 	glm::mat4 viewProjectionPrevious    = m_cameraManager.getActiveCamera().getMVP();
+	const glm::mat4 modelMatrixGround   = glm::mat4(1.f);
 
 	while (m_window.isWindowOpen()) {
 		vkcv::Window::pollEvents();
@@ -120,20 +127,27 @@ void App::run() {
 		m_cameraManager.update(0.000001 * static_cast<double>(deltatime.count()));
 		const glm::mat4 viewProjection = m_cameraManager.getActiveCamera().getMVP();
 
-		const auto      time            = frameEndTime - appStartTime;
-		const float     fCurrentTime    = std::chrono::duration_cast<std::chrono::milliseconds>(time).count() * 0.001f;
-		const float     currentHeight   = glm::sin(fCurrentTime * objectVerticalSpeed);
-		const glm::mat4 modelMatrix     = glm::translate(glm::mat4(1), glm::vec3(0, currentHeight, 0));
-		const glm::mat4 mvp             = viewProjection * modelMatrix;
+		const auto      time                = frameEndTime - appStartTime;
+		const float     fCurrentTime        = std::chrono::duration_cast<std::chrono::milliseconds>(time).count() * 0.001f;
+		const float     currentHeight       = glm::sin(fCurrentTime * objectVerticalSpeed);
+		const glm::mat4 modelMatrixSphere   = glm::translate(glm::mat4(1), glm::vec3(0, currentHeight, 0));
+		const glm::mat4 mvpSphere           = viewProjection * modelMatrixSphere;
+		const glm::mat4 mvpGround           = viewProjection * modelMatrixGround;
 
 		const vkcv::CommandStreamHandle cmdStream = m_core.createCommandStream(vkcv::QueueType::Graphics);
 
 		// prepass
-		glm::mat4 prepassMatrices[2] = {
-			mvp,
-			mvpPrevious };
-		vkcv::PushConstants prepassPushConstants(sizeof(glm::mat4)*2);
-		prepassPushConstants.appendDrawcall(prepassMatrices);
+		vkcv::PushConstants prepassPushConstants(sizeof(glm::mat4) * 2);
+
+		glm::mat4 sphereMatricesPrepass[2] = {
+			mvpSphere,
+			mvpSpherePrevious };
+		prepassPushConstants.appendDrawcall(sphereMatricesPrepass);
+
+		glm::mat4 groundMatricesPrepass[2] = {
+			mvpGround,
+			mvpGroundPrevious };
+		prepassPushConstants.appendDrawcall(groundMatricesPrepass);
 
 		const std::vector<vkcv::ImageHandle> prepassRenderTargets = {
 			m_renderTargets.motionBuffer,
@@ -144,7 +158,7 @@ void App::run() {
 			m_prePass.renderPass,
 			m_prePass.pipeline,
 			prepassPushConstants,
-			{ sphereDrawcall },
+			{ drawcallSphere, drawcallGround },
 			prepassRenderTargets);
 
 		// sky prepass
@@ -168,14 +182,15 @@ void App::run() {
 			m_renderTargets.depthBuffer };
 
 		vkcv::PushConstants meshPushConstants(sizeof(glm::mat4));
-		meshPushConstants.appendDrawcall(mvp);
+		meshPushConstants.appendDrawcall(mvpSphere);
+		meshPushConstants.appendDrawcall(mvpGround);
 
 		m_core.recordDrawcallsToCmdStream(
 			cmdStream,
 			m_meshPass.renderPass,
 			m_meshPass.pipeline,
 			meshPushConstants,
-			{ sphereDrawcall },
+			{ drawcallSphere, drawcallGround },
 			renderTargets);
 
 		// sky
@@ -290,7 +305,8 @@ void App::run() {
 		m_core.endFrame();
 
 		viewProjectionPrevious  = viewProjection;
-		mvpPrevious             = mvp;
+		mvpSpherePrevious       = mvpSphere;
+		mvpGroundPrevious       = mvpGround;
 		frameStartTime          = frameEndTime;
 	}
 }
\ No newline at end of file
diff --git a/projects/indirect_dispatch/src/App.hpp b/projects/indirect_dispatch/src/App.hpp
index 9871776f2f5a4ae06b69d6572adb7c18063bc613..ffe75c87bcb3462746129a32e1657e16566ac663 100644
--- a/projects/indirect_dispatch/src/App.hpp
+++ b/projects/indirect_dispatch/src/App.hpp
@@ -23,6 +23,7 @@ private:
 
 	MeshResources m_sphereMesh;
 	MeshResources m_cubeMesh;
+	MeshResources m_groundMesh;
 
 	GraphicPassHandles m_meshPass;
 	GraphicPassHandles m_skyPass;
diff --git a/projects/indirect_dispatch/src/MotionBlur.cpp b/projects/indirect_dispatch/src/MotionBlur.cpp
index 946bc00825b29a1b4fc779711fa22341fa283dd6..cc1bbc82a21b7bf19c6147d297230b8289c0d892 100644
--- a/projects/indirect_dispatch/src/MotionBlur.cpp
+++ b/projects/indirect_dispatch/src/MotionBlur.cpp
@@ -63,27 +63,29 @@ vkcv::ImageHandle MotionBlur::render(
 
 	computeMotionTiles(cmdStream, motionBufferFullRes);
 
-	vkcv::ImageHandle inputMotionBuffer;
+	// usually this is the neighbourhood max, but other modes can be used for comparison/debugging
+	vkcv::ImageHandle inputMotionTiles;
 	if (motionVectorMode == eMotionVectorMode::FullResolution)
-		inputMotionBuffer = motionBufferFullRes;
+		inputMotionTiles = motionBufferFullRes;
 	else if (motionVectorMode == eMotionVectorMode::MaxTile)
-		inputMotionBuffer = m_renderTargets.motionMax;
+		inputMotionTiles = m_renderTargets.motionMax;
 	else if (motionVectorMode == eMotionVectorMode::MaxTileNeighbourhood)
-		inputMotionBuffer = m_renderTargets.motionMaxNeighbourhood;
+		inputMotionTiles = m_renderTargets.motionMaxNeighbourhood;
 	else {
 		vkcv_log(vkcv::LogLevel::ERROR, "Unknown eMotionInput enum value");
-		inputMotionBuffer = motionBufferFullRes;
+		inputMotionTiles = m_renderTargets.motionMaxNeighbourhood;
 	}
 
 	vkcv::DescriptorWrites motionBlurDescriptorWrites;
 	motionBlurDescriptorWrites.sampledImageWrites = {
 		vkcv::SampledImageDescriptorWrite(0, colorBuffer),
 		vkcv::SampledImageDescriptorWrite(1, depthBuffer),
-		vkcv::SampledImageDescriptorWrite(2, inputMotionBuffer) };
+		vkcv::SampledImageDescriptorWrite(2, motionBufferFullRes),
+		vkcv::SampledImageDescriptorWrite(3, inputMotionTiles) };
 	motionBlurDescriptorWrites.samplerWrites = {
-		vkcv::SamplerDescriptorWrite(3, m_nearestSampler) };
+		vkcv::SamplerDescriptorWrite(4, m_nearestSampler) };
 	motionBlurDescriptorWrites.storageImageWrites = {
-		vkcv::StorageImageDescriptorWrite(4, m_renderTargets.outputColor) };
+		vkcv::StorageImageDescriptorWrite(5, m_renderTargets.outputColor) };
 
 	m_core->writeDescriptorSet(m_motionBlurPass.descriptorSet, motionBlurDescriptorWrites);
 
@@ -109,7 +111,7 @@ vkcv::ImageHandle MotionBlur::render(
 	m_core->prepareImageForStorage(cmdStream, m_renderTargets.outputColor);
 	m_core->prepareImageForSampling(cmdStream, colorBuffer);
 	m_core->prepareImageForSampling(cmdStream, depthBuffer);
-	m_core->prepareImageForSampling(cmdStream, inputMotionBuffer);
+	m_core->prepareImageForSampling(cmdStream, inputMotionTiles);
 
 	const auto fullscreenDispatchSizes = computeFullscreenDispatchSize(
 		m_core->getImageWidth(m_renderTargets.outputColor),