将笛卡尔坐标转换为球坐标时的纹理伪影

问题描述 投票:0回答:1

我正在尝试编写允许我操纵纹理的函数,并且遇到了一个我认为可能是由于双精度引起的问题,但我不确定。 首先,我加载 HDR 图像,以便计算它的辐照度图。我需要将像素坐标转换为 UV,将 UV 转换为球面坐标,然后来回转换笛卡尔坐标。

当我检查如何转换 UV -> spherical -> UV 时,它似乎是有效的:

另一方面,转换 UV -> 球形 -> 笛卡尔 -> 球形 -> UV 给了我这个:

有两条接缝:一条在 x = 0.5 处,一条在 x = 0 处不那么明显。

此外,奇怪的是,这条接缝具有统一的 RGB 值,不管像素是否使用双线性插值。 这是我在计算插值的类中使用的代码:

#ifndef OFFLINECUBEMAPPROCESSING_H
#define OFFLINECUBEMAPPROCESSING_H
#include "Texture.h"
#include "../kernels/CubemapProcessing.cuh"
#include <sstream>


constexpr long double epsilon = 1e-10; 
constexpr glm::dvec3 RED = glm::dvec3(1. , 0 , 0); 
constexpr glm::dvec3 YELLOW = glm::dvec3(0 , 1 , 1); 
constexpr glm::dvec3 GREEN = glm::dvec3(0 , 1 ,0);
constexpr glm::dvec3 BLUE = glm::dvec3(0 , 0 , 1);
constexpr glm::dvec3 BLACK = glm::dvec3(0); 

template <class T> 
class EnvmapProcessing{
public:

/**
 * @brief Construct a texture from an envmap double HDR with 3 channels 
 * 
 * @param _data 
 * @param _width 
 * @param _height 
 */
EnvmapProcessing(const T *_data , const unsigned _width , const unsigned _height , 
    const unsigned int num_channels = 3){
    width = _width ; 
    height = _height ;
    channels = num_channels ; 
    for(unsigned i = 0 ; i < width * height * num_channels ; i+=num_channels){
        data.push_back(glm::vec3(_data[i] , _data[i+1] , _data[i+2])); 
    } 
 }
virtual ~EnvmapProcessing(){}

const std::vector<glm::vec3>& getData(){return data;}

/**
 * @brief Computes the radiance map according to the roughness values
 * 
 * @param roughness 
 * @return TextureData 
 */
TextureData computeSpecularIrradiance(double roughness);

/* Implementation of templated methods*/
public:

/**
 * @brief Get the normalized UV in [0 , 1] range , from [0 ,width/height] coordinates.
 * @tparam D Type of coordinates .
 * @param x Width coordinates in the range [0 , width[.
 * @param y Height coordinates in the range [0 , height[.
 * @return const glm::dvec2 UVs in the range [0 , 1].
 */
template<class D>
inline const glm::dvec2 getUvFromPixelCoords(const D x , const D y) const {
    return glm::dvec2(static_cast<long double>(x) / static_cast<long double>(width - 1) , static_cast<long double>(y) / static_cast<long double>(height - 1)) ; 
} 

/**
 * @brief Get the Pixel Coords From Uv object
 * 
 * @tparam D 
 * @param u 
 * @param v 
 * @return const glm::dvec2 
 */
template<class D>
inline const glm::dvec2 getPixelCoordsFromUv(const D u , const D v) const {
        return glm::dvec2(u * (static_cast<double>(width) - 1) , v * (static_cast<double>(height) - 1)); 
}


/**
 * @brief This method wrap around if the texture coordinates provided land beyond the texture dimensions, repeating the texture values on both axes. 
 * 
 * @tparam D Data type of the coordinates.
 * @param u Horizontal UV coordinates.
 * @param v Vertical UV coordinates.
 * @return const glm::dvec2 Normalized coordinates in the [0 , 1] range.
 */
template<class D>
inline const glm::dvec2 wrapAroundTexCoords(const D u , const D v) const {
    D u_integer = 0 , v_integer = 0 ; 
    D u_double_p = 0. , v_double_p = 0. ; 
    u_integer = std::floor(u); 
    v_integer = std::floor(v); 
    if(u > 1. || u < 0.)
        u_double_p = u - u_integer ; 
    else
        u_double_p = u ; 
    if (v > 1. || v < 0.)
        v_double_p = v - v_integer ;  
    else
        v_double_p = v ;
    return glm::dvec2(u_double_p , v_double_p); 
}       

/**
 * @brief Normalizes a set of pixel coordinates into texture bounds. 
 *       
 * @param x Horizontal coordinates. 
 * @param y Vertical coordinates.
 * @return const glm::dvec2 Normalized coordinates 
 */
inline const glm::dvec2 wrapAroundPixelCoords(const int x , const int y) const {
    unsigned int x_coord = 0 , y_coord = 0 ;
    int _width = static_cast<int>(width); 
    int _height = static_cast<int>(height);  
    if(x >= _width)
        x_coord = x % _width ; 
    else if(x < 0)
        x_coord = _width + (x % _width) ; 
    else
        x_coord = x ; 
    if(y >= _height)
        y_coord = y % _height ; 
    else if(y < 0)
        y_coord = _height + (y % _height); 
    else
        y_coord = y ;
    return glm::dvec2(x_coord , y_coord) ; 
}

/**
* @brief Computes an interpolation between 4 pixels. 
* 
* @param top_left 
* @param top_right 
* @param bottom_left 
* @param bottom_right 
* @param point 
* @return * const T 
*/
const glm::dvec3 bilinearInterpolate(const glm::dvec2 top_left , const glm::dvec2 top_right , const glm::dvec2 bottom_left , const glm::dvec2 bottom_right , const glm::dvec2 point) const {
    const double u = (point.x - top_left.x) / (top_right.x - top_left.x); 
    const double v = (point.y - top_left.y) / (bottom_left.y - top_left.y);
    const glm::dvec3 top_interp = (1 - u) * discreteSample(top_left.x , top_left.y) + u * discreteSample(top_right.x , top_right.y);
    const glm::dvec3 bot_interp = (1 - u) * discreteSample(bottom_left.x , bottom_left.y)  + u * discreteSample(bottom_right.x , bottom_right.y) ;  
    return (1 - v) * top_interp + v * bot_interp ; 
}


/**
 * @brief Sample the textures using Integer coordinates
 * 
 * @param x Horizontal coordinates
 * @param y Vertical coordinates
 * @return const T RGB value of the sampled texel
 */
inline const glm::dvec3 discreteSample(int x , int y) const{
    const glm::dvec2 normalized = wrapAroundPixelCoords(static_cast<int>(x) , static_cast<int>(y));
    const glm::dvec3 texel_value = data[normalized.x * height + normalized.y] ;
    return texel_value ; 
}


/**
 * @brief This method samples a value from the equirectangular envmap 
 *! Note : In case the coordinates go beyond the bounds of the texture , we wrap around .
 *! In addition , sampling texels may return a bilinear interpolated value when u,v are converted to a (x ,y) non integer texture coordinate.  
 * @tparam D Type of the coordinates 
 * @param u Horizontal uv coordinates
 * @param v Vertical uv coordinates
 * @return T Returns a texel value of type T 
 */
template<class D> 
inline const glm::dvec3 uvSample(const D u , const D v) const {
    const glm::dvec2 wrap_uv = wrapAroundTexCoords(u , v); 
    const glm::dvec2 pixel_coords = getPixelCoordsFromUv(wrap_uv.x , wrap_uv.y);
    const glm::dvec2 top_left(std::floor(pixel_coords.x) , std::floor(pixel_coords.y));
    const glm::dvec2 top_right(std::floor(pixel_coords.x) + 1 , std::floor(pixel_coords.y));
    const glm::dvec2 bottom_left(std::floor(pixel_coords.x)  , std::floor(pixel_coords.y) + 1);
    const glm::dvec2 bottom_right(std::floor(pixel_coords.x) + 1 , std::floor(pixel_coords.y) + 1);
    const glm::dvec3 texel_value = bilinearInterpolate(top_left , top_right , bottom_left , bottom_right , pixel_coords); 
    return texel_value ; 
} 

/**
 * @brief Bake an equirect envmap to an irradiance map
 * @param delta Size of the step
 * @return std::unique_ptr<TextureData> Texture data containing width , height , and double f_data about the newly created map.
 */
std::unique_ptr<TextureData> computeDiffuseIrradiance(const T delta) const {
    TextureData envmap_tex_data ; 
    envmap_tex_data.data_format = Texture::RGB ; 
    envmap_tex_data.internal_format = Texture::RGB32F ; 
    envmap_tex_data.data_type = Texture::FLOAT;
    envmap_tex_data.width = width ;
    envmap_tex_data.height = height ;  
    envmap_tex_data.mipmaps = 0;
    envmap_tex_data.f_data = new float[width * height * channels];
    unsigned index = 0 ;  
    for(unsigned i = 0 ; i < width ; i++){
            for(unsigned j = 0 ; j < height ; j++){
                    glm::dvec2 uv = getUvFromPixelCoords(i , j);
                    const glm::dvec2 sph = gpgpu_math::uvToSpherical(uv.x , uv.y);
                    const glm::dvec2 sph_to_uv = gpgpu_math::sphericalToUv(sph); 
                    const glm::dvec3 cart = gpgpu_math::sphericalToCartesian(sph.x , sph.y);
                    glm::dvec2 cart_to_sph = gpgpu_math::cartesianToSpherical(cart); 
                    glm::dvec2 uvt = gpgpu_math::sphericalToUv(cart_to_sph); //The problem is here , when the UVs have been obtained from Cartesians -> spherical coordinates. 
                    const glm::dvec3 irrad = uvSample(uvt.x , uvt.y);
                    float x = static_cast<float>(irrad.x) ;
                    float y = static_cast<float>(irrad.y) ;
                    float z = static_cast<float>(irrad.z) ;
                    envmap_tex_data.f_data[index++] = x ;  
                    envmap_tex_data.f_data[index++] = y ; 
                    envmap_tex_data.f_data[index++] = z ; 
            }
    }

    return std::make_unique<TextureData>(envmap_tex_data);
} 


protected:
    std::vector<glm::vec3> data ;
    unsigned width ; 
    unsigned height ;
    unsigned channels ; 
};

此外,这是进行转换的代码:

#ifndef CUBEMAPPROCESSING_CUH
#define CUBEMAPPROCESSING_CUH
#include "Includes.cuh"
#include <cmath>
namespace gpgpu_math{


template<class T>
__host__ __device__
inline const glm::dvec2 uvToSpherical(const T u , const T v){
     const T phi = 2 * PI * u; 
     const T theta = PI * v ;
     return glm::dvec2(phi , theta); 
}

__host__ __device__
inline const glm::dvec2 uvToSpherical(glm::dvec2 uv){
    return uvToSpherical(uv.x , uv.y); 
}

template<class T>
__host__ __device__
inline const glm::dvec2 sphericalToUv(const T phi , const T theta){
    const T u = phi / (2 * PI) ; 
    const T v = theta / PI ; 
    return glm::dvec2(u , v); 
}

__host__ __device__
inline const glm::dvec2 sphericalToUv(glm::dvec2 sph){
    return sphericalToUv(sph.x , sph.y);
}

template<class T>
__host__ __device__
inline const glm::dvec3 sphericalToCartesian(const T phi , const T theta){
    const T z = cos(theta);
    const T x = sin(theta) * cos(phi); 
    const T y = sin(theta) * sin(phi); 
    return glm::normalize(glm::dvec3(x , y , z)); 
}

__host__ __device__
inline const glm::dvec3 sphericalToCartesian(glm::dvec2 sph){
    return sphericalToCartesian(sph.x , sph.y); 
}

template<class T>
__host__ __device__
inline const glm::dvec2 cartesianToSpherical(const T x , const T y , const T z){
    const T theta = acos(z); 
    const T phi = atan2f(y , x);
    return glm::dvec2(phi , theta); 
}

__host__ __device__
inline const glm::dvec2 cartesianToSpherical(glm::dvec3 xyz){
    return cartesianToSpherical(xyz.x , xyz.y , xyz.z); 
}

所以,问题回顾一下:

  1. 转换 UV->球形->UV 有效。
  2. 转换 UV->球面->笛卡尔->球面->UV 给出了大致良好的坐标,但不精确似乎在屏幕中间创建了伪影线......好吧,我想这是一个精度问题,但也许我把数学搞砸了。
  3. 该线正好位于 X 轴的中间,且 x = 0 处。 所以 0 和 PI .
  4. 伪像似乎受到周围像素的影响,但不受插值的影响,因为它倾向于保持相同的 RGB 值。
c++ math textures
1个回答
0
投票

将我看似有用的评论变成连贯的答案。

sphericalToCartesian
你有

    const T z = cos(theta);
    const T x = sin(theta) * cos(phi); 
    const T y = sin(theta) * sin(phi); 

cartesianToSpherical
尝试使用

来反转它
    const T theta = acos(z); 
    const T phi = atan2f(y , x);

但如果 θ 是 π 的倍数,则

sin(theta)
为零,因此
x = y = 0
。这会产生
phi = atan2f(0, 0) = 0
,而不是 φ 的原始值。因此,对于 θ 的某些值,坐标无法在从球面到笛卡尔坐标再返回的往返过程中保留下来。

通常您会期望这种情况发生在球体的北极和南极,这应该对应于纹理图像的顶部和底部边缘。但看来您也交换了

x
y
坐标。当您编写
data[normalized.x * height + normalized.y]
时,将以 列优先顺序 访问像素,其中图像数据的更常见约定是行优先,即使用
data[normalized.x + normalized.y * width]
。这可能会导致 θ 的临界值对应于图像中令人惊讶的位置,从而导致这些垂直接缝而不是顶部和底部的水平特征。

© www.soinside.com 2019 - 2024. All rights reserved.