Search Unity

  1. Good news ✨ We have more Unite Now videos available for you to watch on-demand! Come check them out and ask our experts any questions!
    Dismiss Notice
  2. Ever participated in one our Game Jams? Want pointers on your project? Our Evangelists will be available on Friday to give feedback. Come share your games with us!
    Dismiss Notice

Stereo Shader Issues - Doubled image?

Discussion in 'AR/VR (XR) Discussion' started by Manello, Jun 15, 2020.

  1. Manello

    Manello

    Joined:
    Dec 14, 2014
    Posts:
    9
    Hey!

    So after I got some outline shader working for a normal rendering process I wanted to use it for a VR application.

    The shader currently has 2 problems:
    1: It renders the post processed effect twice on each eye as seen below, in a very weird screen division.
    2: The displacement between the outline and the actual object is not correct.

    vrout.PNG

    I found this documentation describing the problems above to some extend: https://docs.unity3d.com/Manual/SinglePassStereoRendering.html

    But the problem is that I am fairly new to shaders and I can't figure out how to use all these extra functions. I had no luck by just playing around with it for some hours.

    Has anyone had similar problems with their VR shader and might be able to help me here?

    Info: I am using Multipass rendering, if you say there is a simple fix for Single Pass as well I am also up for that.
    Using Unity 2019.3

    Shader code:

    Code (CSharp):
    1. Shader "Hidden/Roystan/Outline Post Process"
    2. {
    3.     SubShader
    4.     {
    5.         Cull Off ZWrite Off ZTest Always
    6.  
    7.         Pass
    8.         {
    9.             // Custom post processing effects are written in HLSL blocks,
    10.             // with lots of macros to aid with platform differences.
    11.             // https://github.com/Unity-Technologies/PostProcessing/wiki/Writing-Custom-Effects#shader
    12.             HLSLPROGRAM
    13.             #pragma vertex Vert
    14.             #pragma fragment Frag
    15.             #include "Packages/com.unity.postprocessing/PostProcessing/Shaders/StdLib.hlsl"
    16.  
    17.             TEXTURE2D_SAMPLER2D(_MainTex, sampler_MainTex);
    18.             // _CameraNormalsTexture contains the view space normals transformed
    19.             // to be in the 0...1 range.
    20.             TEXTURE2D_SAMPLER2D(_CameraNormalsTexture, sampler_CameraNormalsTexture);
    21.             TEXTURE2D_SAMPLER2D(_CameraDepthTexture, sampler_CameraDepthTexture);
    22.      
    23.             // Data pertaining to _MainTex's dimensions.
    24.             // https://docs.unity3d.com/Manual/SL-PropertiesInPrograms.html
    25.             float4 _MainTex_TexelSize;
    26.  
    27.             float _Scale;
    28.             float4 _Color;
    29.  
    30.             float _DepthThreshold;
    31.             float _DepthNormalThreshold;
    32.             float _DepthNormalThresholdScale;
    33.  
    34.             float _NormalThreshold;
    35.  
    36.             float _UseTexture;
    37.             float4 _FaceColor;
    38.  
    39.             half4 _MainTex_ST;
    40.             //uniform sampler2D _MainTex;
    41.  
    42.             // This matrix is populated in PostProcessOutline.cs.
    43.             float4x4 _ClipToView;
    44.  
    45.             // Combines the top and bottom colors using normal blending.
    46.             // https://en.wikipedia.org/wiki/Blend_modes#Normal_blend_mode
    47.             // This performs the same operation as Blend SrcAlpha OneMinusSrcAlpha.
    48.             float4 alphaBlend(float4 top, float4 bottom)
    49.             {
    50.                 float3 color = (top.rgb * top.a) + (bottom.rgb * (1 - top.a));
    51.                 float alpha = top.a + bottom.a * (1 - top.a);
    52.  
    53.                 return float4(color, alpha);
    54.             }
    55.  
    56.             // Both the Varyings struct and the Vert shader are copied
    57.             // from StdLib.hlsl included above, with some modifications.
    58.             struct Varyings
    59.             {
    60.                 float4 vertex : SV_POSITION;
    61.                 float2 texcoord : TEXCOORD0;
    62.                 float2 texcoordStereo : TEXCOORD1;
    63.                 float3 viewSpaceDir : TEXCOORD2;
    64.             #if STEREO_INSTANCING_ENABLED
    65.                 uint stereoTargetEyeIndex : SV_RenderTargetArrayIndex;
    66.             #endif
    67.             };
    68.  
    69.             Varyings Vert(AttributesDefault v)
    70.             {
    71.                 Varyings o;
    72.                 o.vertex = float4(v.vertex.xy, 0.0, 1.0);
    73.                 o.texcoord = TransformTriangleVertexToUV(v.vertex.xy);
    74.                 // Transform our point first from clip to view space,
    75.                 // taking the xyz to interpret it as a direction.
    76.                 o.viewSpaceDir = mul(_ClipToView, o.vertex).xyz;
    77.  
    78.             #if UNITY_UV_STARTS_AT_TOP
    79.                 o.texcoord = UnityStereoScreenSpaceUVAdjust(o.texcoord, _MainTex_ST) * float2(1.0, -1.0) + float2(0.0, 1.0);
    80.             #endif
    81.  
    82.                 o.texcoordStereo = TransformStereoScreenSpaceTex(o.texcoord, 1.0);
    83.  
    84.                 return o;
    85.             }
    86.  
    87.             float4 Frag(Varyings i) : SV_Target
    88.             {
    89.                 float halfScaleFloor = floor(_Scale * 0.5);
    90.                 float halfScaleCeil = ceil(_Scale * 0.5);
    91.  
    92.                 // Sample the pixels in an X shape, roughly centered around i.texcoord.
    93.                 // As the _CameraDepthTexture and _CameraNormalsTexture default samplers
    94.                 // use point filtering, we use the above variables to ensure we offset
    95.                 // exactly one pixel at a time.
    96.  
    97.                 //THIS LINE BELOW HAVE TO ADJUST UVS TO STEREO SCREEN SPACE
    98.                 //half4 originalPixel = tex2D(_MainTex, UnityStereoScreenSpaceUVAdjust(input.uv, _MainTex_ST));
    99.                 //half4 outlineSource = tex2D(_OutlineSource, UnityStereoScreenSpaceUVAdjust(uv, _MainTex_ST));
    100.  
    101.                 float4 stereoTex = tex2D(sampler_MainTex, UnityStereoScreenSpaceUVAdjust(i.texcoord, _MainTex_ST));
    102.  
    103.                 float2 bottomLeftUV = i.texcoord - float2(_MainTex_TexelSize.x, _MainTex_TexelSize.y) * halfScaleFloor;
    104.                 float2 topRightUV = i.texcoord + float2(_MainTex_TexelSize.x, _MainTex_TexelSize.y) * halfScaleCeil;
    105.                 float2 bottomRightUV = i.texcoord + float2(_MainTex_TexelSize.x * halfScaleCeil, -_MainTex_TexelSize.y * halfScaleFloor);
    106.                 float2 topLeftUV = i.texcoord + float2(-_MainTex_TexelSize.x * halfScaleFloor, _MainTex_TexelSize.y * halfScaleCeil);
    107.  
    108.                 float3 normal0 = SAMPLE_TEXTURE2D(_CameraNormalsTexture, sampler_CameraNormalsTexture, bottomLeftUV).rgb;
    109.                 float3 normal1 = SAMPLE_TEXTURE2D(_CameraNormalsTexture, sampler_CameraNormalsTexture, topRightUV).rgb;
    110.                 float3 normal2 = SAMPLE_TEXTURE2D(_CameraNormalsTexture, sampler_CameraNormalsTexture, bottomRightUV).rgb;
    111.                 float3 normal3 = SAMPLE_TEXTURE2D(_CameraNormalsTexture, sampler_CameraNormalsTexture, topLeftUV).rgb;
    112.  
    113.                 float depth0 = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, sampler_CameraDepthTexture, bottomLeftUV).r;
    114.                 float depth1 = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, sampler_CameraDepthTexture, topRightUV).r;
    115.                 float depth2 = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, sampler_CameraDepthTexture, bottomRightUV).r;
    116.                 float depth3 = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, sampler_CameraDepthTexture, topLeftUV).r;
    117.  
    118.                 // Transform the view normal from the 0...1 range to the -1...1 range.
    119.                 float3 viewNormal = normal0 * 2 - 1;
    120.                 float NdotV = 1 - dot(viewNormal, -i.viewSpaceDir);
    121.  
    122.                 // Return a value in the 0...1 range depending on where NdotV lies
    123.                 // between _DepthNormalThreshold and 1.
    124.                 float normalThreshold01 = saturate((NdotV - _DepthNormalThreshold) / (1 - _DepthNormalThreshold));
    125.                 // Scale the threshold, and add 1 so that it is in the range of 1..._NormalThresholdScale + 1.
    126.                 float normalThreshold = normalThreshold01 * _DepthNormalThresholdScale + 1;
    127.  
    128.                 // Modulate the threshold by the existing depth value;
    129.                 // pixels further from the screen will require smaller differences
    130.                 // to draw an edge.
    131.                 float depthThreshold = _DepthThreshold * depth0 * normalThreshold;
    132.  
    133.                 float depthFiniteDifference0 = depth1 - depth0;
    134.                 float depthFiniteDifference1 = depth3 - depth2;
    135.                 // edgeDepth is calculated using the Roberts cross operator.
    136.                 // The same operation is applied to the normal below.
    137.                 // https://en.wikipedia.org/wiki/Roberts_cross
    138.                 float edgeDepth = sqrt(pow(depthFiniteDifference0, 2) + pow(depthFiniteDifference1, 2)) * 100;
    139.                 edgeDepth = edgeDepth > depthThreshold ? 1 : 0;
    140.  
    141.                 float3 normalFiniteDifference0 = normal1 - normal0;
    142.                 float3 normalFiniteDifference1 = normal3 - normal2;
    143.                 // Dot the finite differences with themselves to transform the
    144.                 // three-dimensional values to scalars.
    145.                 float edgeNormal = sqrt(dot(normalFiniteDifference0, normalFiniteDifference0) + dot(normalFiniteDifference1, normalFiniteDifference1));
    146.                 edgeNormal = edgeNormal > _NormalThreshold ? 1 : 0;
    147.  
    148.                 float edge = max(edgeDepth, edgeNormal);
    149.  
    150.                 float4 edgeColor = float4(_Color.rgb, _Color.a * edge);
    151.  
    152.                 if (_UseTexture > 0) {
    153.                     float4 color = SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, i.texcoord);
    154.                     return alphaBlend(edgeColor, color);
    155.                 }
    156.                 else {
    157.                     return alphaBlend(edgeColor, _FaceColor);
    158.                 }
    159.              
    160.             }
    161.             ENDHLSL
    162.         }
    163.     }
    164. }
     
    Last edited: Jun 15, 2020
  2. ThomasZeng

    ThomasZeng

    Unity Technologies

    Joined:
    Jun 24, 2019
    Posts:
    26
    Hi @Manello
    May I ask which Render Pipeline you were using in your project?
    I assume this is built-in renderer(not URP or HDRP)?
     
  3. Manello

    Manello

    Joined:
    Dec 14, 2014
    Posts:
    9
    Yup I am just using the built in RP, as I had no luck at all with using URP/HDRP in VR.
    @ThomasZeng
     
  4. ThomasZeng

    ThomasZeng

    Unity Technologies

    Joined:
    Jun 24, 2019
    Posts:
    26
    Thanks @Manello!
    If you could provide me a repro project, I could take a look and provide some feedback :)
     
  5. Manello

    Manello

    Joined:
    Dec 14, 2014
    Posts:
    9
  6. ThomasZeng

    ThomasZeng

    Unity Technologies

    Joined:
    Jun 24, 2019
    Posts:
    26
unityunity