taby said:
The only reason that I did the things this way because there is no recursion in Vulkan/GLSL.
You can simulate recursion with stack. That's not a problem (which is the common way how you do a traversal through tree).
Anyways - to your question … let me copy the code here and add comments with questions (because it doesn't make that much sense to me):
// This function takes in - eye is camera position, direction is primary ray direction, correct? What's the purpose of this function? Perform eye-path path trace? Perform light-path path trace? Or?
//
// Digging through this - this seems to do whole bidirectional path...
float trace_path_forward(const vec3 eye, const vec3 direction, const float hue, const float eta)
{
// The following part selects a random light, then selects a random point on light and then selects random direction (cosine-weighted) ... is this correct?
// Why is this even done here? This makes sense only for light path...
const vec3 mask = hsv2rgb(vec3(hue, 1.0, 1.0));
Vertex A, B, C;
int index = int(round(stepAndOutputRNGFloat(prng_state)*float(ubo.light_tri_count - 1)));
get_triangle_vertices_by_light_index(A, B, C, index);
const vec3 light_o = getRandomPointOnTriangle(A.pos, B.pos, C.pos);
const vec3 light_d = cosWeightedRandomHemisphereDirection(get_normal_by_light_index(index), prng_state);
float area = get_area_by_light_index(index);
const float energy = area;
// So... here you start with an eye path ... step_locations should contain vertices along eye path, correct?
// If we hit light, we return - otherwise we continue in path.
uint step_count = 0;
vec3 step_locations[max_bounces + 3];
step_locations[step_count] = eye;
step_count++;
traceRayEXT(topLevelAS, gl_RayFlagsOpaqueEXT, 0xff, 0, 0, 0, eye, 0.001, direction, 10000.0, 0);
if(rayPayload.dist != -1.0)
{
vec3 hitPos = eye + direction * rayPayload.dist;
step_locations[step_count] = hitPos;
step_count++;
if( rayPayload.color.r == 20.0 &&
rayPayload.color.g == 20.0 &&
rayPayload.color.b == 20.0)
{
float local_colour = (rayPayload.color.r*mask.r + rayPayload.color.g*mask.g + rayPayload.color.b*mask.b);
return local_colour;
}
}
else
{
// If hit the sky
return 0.0;
}
// Wait ... what is this. So - now you build the first vertex on light path (which is point on the light)
//
// Then you attempt to perform a merge between end of eye-path and sampled point on light path (i.e. like simple explicit sample).
// And therefore connect light and eye paths.
//
// If connection is found - you continue?
// If no connection is found - you iteratively bounce light path and test if there is connection?
bool found_path = false;
step_locations[step_count] = light_o;
step_count++;
if(true == is_clear_line_of_sight(step_locations[1], light_o))
{
found_path = true;
}
else
{
vec3 o = light_o;
vec3 d = light_d;
for(int i = 0; i < max_bounces; i++)
{
traceRayEXT(topLevelAS, gl_RayFlagsOpaqueEXT, 0xff, 0, 0, 0, o, 0.001, d, 10000.0, 0);
// If hit the sky
if(rayPayload.dist == -1.0)
return 0.0;
vec3 hitPos = o + d * rayPayload.dist;
step_locations[step_count] = hitPos;
step_count++;
if(true == is_clear_line_of_sight(step_locations[1], step_locations[step_count - 1]))
{
found_path = true;
break;
}
o = hitPos + rayPayload.normal * 0.01;
d = cosWeightedRandomHemisphereDirection(rayPayload.normal, prng_state);
}
}
// Uhm... in case you can't connect eye path and light path ... you continue tracing eye path? Returning its color directly?
//
// This doesn't make sense to me, why would you even do that (with BRDF being different! and scattering (while no scattering happens here!!!))?
// How do you expect to hit a tiny light in this case when you can't find connection with any vertex on the light path?
//
// I don't think this should be here
if(found_path == false)
return trace_path_backward(max_bounces, eye, direction, hue, eta);
// ???
//
// Your buffer is laid in a way:
// [eye, 1st eye-path vertex, 1st light path vertex, 2nd light path vertex, ...]
//
// you reverse the light path here - correct?
// Reverse the last part of the path
uint start = 2;
uint end = step_count - 1;
while(start < end)
{
vec3 temp = step_locations[start];
step_locations[start] = step_locations[end];
step_locations[end] = temp;
start++;
end--;
}
// So... at this point you have eye path (just origin and single vertex) and light path that surely connects with first vertex on eye path (endpoint))
//
// So at this point you perform merge - why are you re-tracing rays between points on eye path and light path. You have to merge paths, not re-trace them.
// Also, you already performed endpoint connection in is_clear_line_of_sight - why tracing ray again, that is expensive operation
//
// Merging paths is the tricky part - you can either merge only endpoints (I'd start with that one as it's easy - it's basically the same as doing explicit
// step in standard path tracing), then start looking at how to merge multiple vertices
float ret_colour = 0;
float local_colour = energy;
float total = 0;
for(int i = 0; i < step_count - 1; i++)
{
vec3 step_o = step_locations[i];
vec3 step_d = step_locations[i + 1] - step_locations[i];
traceRayEXT(topLevelAS, gl_RayFlagsOpaqueEXT, 0xff, 0, 0, 0, step_o, 0.001, step_d, 10000.0, 0);
local_colour *= (rayPayload.color.r*mask.r + rayPayload.color.g*mask.g + rayPayload.color.b*mask.b);
total += mask.r;
total += mask.g;
total += mask.b;
// If hit a light
if( rayPayload.color.r == 20.0 &&
rayPayload.color.g == 20.0 &&
rayPayload.color.b == 20.0)
{
ret_colour += local_colour;
break;
}
}
return ret_colour / total;
}