//Screen space rasterization void rst::rasterizer::rasterize_triangle(const Triangle& t, const std::array<Eigen::Vector3f, 3>& view_pos) { // TODO: From your HW3, get the triangle rasterization code. // TODO: Inside your rasterization loop: // * v[i].w() is the vertex view space depth value z. // * Z is interpolated view space depth for the current pixel // * zp is depth between zNear and zFar, used for z-buffer
// TODO: Interpolate the attributes: // auto interpolated_color // auto interpolated_normal // auto interpolated_texcoords // auto interpolated_shadingcoords
// Use: fragment_shader_payload payload(interpolated_color, interpolated_normal.normalized(), interpolated_texcoords, texture ? &*texture : nullptr); // Use: payload.view_pos = interpolated_shadingcoords; // Use: Instead of passing the triangle's color directly to the frame buffer, pass the color to the shaders first to get the final color; // Use: auto pixel_color = fragment_shader(payload); auto v = t.toVector4(); auto minX = std::min(v[0].x(), std::min(v[1].x(), v[2].x())); auto minY = std::min(v[0].y(), std::min(v[1].y(), v[2].y())); auto maxX = std::max(v[0].x(), std::max(v[1].x(), v[2].x())); auto maxY = std::max(v[0].y(), std::max(v[1].y(), v[2].y()));
for (int i = minX; i <= maxX; i++) { for (int j = minY; j <= maxY; j++) { if (insideTriangle(i + 0.5, j + 0.5, t.v)) { auto tup = computeBarycentric2D(i, j, t.v);
Eigen::Vector3f color = payload.color; Eigen::Vector3f point = payload.view_pos; Eigen::Vector3f normal = payload.normal;
Eigen::Vector3f result_color = {0, 0, 0}; for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object. Eigen::Vector3f v = (eye_pos - point).normalized(); Eigen::Vector3f l = (light.position - point).normalized(); auto r = l.dot(l); auto h = (v + l).normalized();
auto ls = ks.cwiseProduct(light.intensity / r) * std::pow(std::max(0.0f, normal.normalized().dot(h)), p);
auto la = ka.cwiseProduct(amb_light_intensity);
auto ld = kd.cwiseProduct(light.intensity / r) * std::max(0.0f, normal.normalized().dot(l));
Eigen::Vector3f getColor(float u, float v) { // 坐标限定 if (u < 0) u = 0; if (u > 1) u = 1; if (v < 0) v = 0; if (v > 1) v = 1; auto u_img = u * width; auto v_img = (1 - v) * height; auto color = image_data.at<cv::Vec3b>(v_img, u_img); return Eigen::Vector3f(color[0], color[1], color[2]); }
Eigen::Vector3f texture_fragment_shader(const fragment_shader_payload& payload) { Eigen::Vector3f return_color = {0, 0, 0}; if (payload.texture) { // TODO: Get the texture value at the texture coordinates of the current fragment auto texcoord = payload.tex_coords; return_color = payload.texture->getColor(texcoord.x(), texcoord.y()); } Eigen::Vector3f texture_color; texture_color << return_color.x(), return_color.y(), return_color.z();
Eigen::Vector3f color = texture_color; Eigen::Vector3f point = payload.view_pos; Eigen::Vector3f normal = payload.normal;
Eigen::Vector3f result_color = {0, 0, 0};
for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object. Eigen::Vector3f v = (eye_pos - point).normalized(); Eigen::Vector3f l = (light.position - point).normalized(); auto r = l.dot(l); auto h = (v + l).normalized(); auto ls = ks.cwiseProduct(light.intensity / r) * std::pow(std::max(0.0f, normal.normalized().dot(h)), p);
auto la = ka.cwiseProduct(amb_light_intensity);
auto ld = kd.cwiseProduct(light.intensity / r) * std::max(0.0f, normal.normalized().dot(l));
result_color += (la + ld + ls); }
return result_color; }
Bump Mapping
法线贴图原理
如果法线处于世界坐标中的(World Space),那称为 World Space Normal。如果是处于物体本身局部坐标中的,那称为 Object Space Normal。
World Space Normal 一旦从贴图里解压出来后,就可以直接用了,效率很高。但是有个缺点,这个 World Space Normal 是固定了,如果物体没有保持原来的方向和位置,那原来生成的法线贴图就作废了。
因此保存了 Object Space Normal。它从贴图里解压,还需要乘以模型 - 视图(Model-View)矩阵转换到世界坐标,或者转换到其他坐标取决于计算过程及需求。Object Space Normal 生成的贴图,物体可以被旋转和位移。但仍有一个缺点。就是 ** 一张贴图只能对应特定的一个模型,模型不能有变形 (deform)**。
Eigen::Vector3f color = payload.color; Eigen::Vector3f point = payload.view_pos; Eigen::Vector3f normal = payload.normal;
float kh = 0.2, kn = 0.1;
// TODO: Implement bump mapping here // Let n = normal = (x, y, z) // Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z)) // Vector b = n cross product t // Matrix TBN = [t b n] // dU = kh * kn * (h(u+1/w,v)-h(u,v)) // dV = kh * kn * (h(u,v+1/h)-h(u,v)) // Vector ln = (-dU, -dV, 1) // Normal n = normalize(TBN * ln)
auto n = normal.normalized(); Eigen::Vector3f t; t << n.x() * n.y() / std::sqrt(n.x() * n.x() + n.z() * n.z()), std::sqrt(n.x() * n.x() + n.z() * n.z()), n.z()* n.y() / std::sqrt(n.x() * n.x() + n.z() * n.z()); auto b = n.cross(t);
Eigen::Matrix3f TBN; TBN << t, b, n;
float w = payload.texture->width; float h = payload.texture->height; auto texcoord = payload.tex_coords; auto dU = kh * kn * (payload.texture->getColor(texcoord.x() + 1 / w, texcoord.y()).norm() - payload.texture->getColor(texcoord.x(), texcoord.y()).norm()); auto dV = kh * kn * (payload.texture->getColor(texcoord.x(), texcoord.y() + 1 / h).norm() - payload.texture->getColor(texcoord.x(), texcoord.y()).norm());
Eigen::Vector3f color = payload.color; Eigen::Vector3f point = payload.view_pos; Eigen::Vector3f normal = payload.normal;
float kh = 0.2, kn = 0.1;
// TODO: Implement displacement mapping here // Let n = normal = (x, y, z) // Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z)) // Vector b = n cross product t // Matrix TBN = [t b n] // dU = kh * kn * (h(u+1/w,v)-h(u,v)) // dV = kh * kn * (h(u,v+1/h)-h(u,v)) // Vector ln = (-dU, -dV, 1) // Position p = p + kn * n * h(u,v) // Normal n = normalize(TBN * ln)
auto n = normal.normalized(); Eigen::Vector3f t; t << n.x() * n.y() / std::sqrt(n.x() * n.x() + n.z() * n.z()), std::sqrt(n.x() * n.x() + n.z() * n.z()), n.z()* n.y() / std::sqrt(n.x() * n.x() + n.z() * n.z()); auto b = n.cross(t);
Eigen::Matrix3f TBN; TBN << t, b, n;
float w = payload.texture->width; float h = payload.texture->height; auto texcoord = payload.tex_coords; auto dU = kh * kn * (payload.texture->getColor(texcoord.x() + 1 / w, texcoord.y()).norm() - payload.texture->getColor(texcoord.x(), texcoord.y()).norm()); auto dV = kh * kn * (payload.texture->getColor(texcoord.x(), texcoord.y() + 1 / h).norm() - payload.texture->getColor(texcoord.x(), texcoord.y()).norm());
Eigen::Vector3f ln; ln << -dU, -dV, 1.0f; point = point + kn * n * payload.texture->getColor(texcoord.x(), texcoord.y()).norm(); normal = TBN * ln; normal = normal.normalized();
Eigen::Vector3f result_color = {0, 0, 0};
for (auto& light : lights) { // TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular* // components are. Then, accumulate that result on the *result_color* object. Eigen::Vector3f v = (eye_pos - point).normalized(); Eigen::Vector3f l = (light.position - point).normalized(); auto r = l.dot(l); auto h = (v + l).normalized();
auto ls = ks.cwiseProduct(light.intensity / r) * std::pow(std::max(0.0f, normal.normalized().dot(h)), p);
auto la = ka.cwiseProduct(amb_light_intensity);
auto ld = kd.cwiseProduct(light.intensity / r) * std::max(0.0f, normal.normalized().dot(l));