Search Unity

Making per-object UV's in screen space

Discussion in 'Shaders' started by oceanq, Jul 25, 2017.

  1. oceanq

    oceanq

    Joined:
    Apr 10, 2015
    Posts:
    22
    I'm trying to make per-object camera facing UV's that "stick to" object for NPR textures (watercolor paper, sketchmaps, etc).
    I figured the best way to do that would be to transform both the objects position and its vertices into either clip space or screenspace, then subtract one from the other and use the XY results as UV coordinates.

    This kind of works when I try it in clip space, but I'm getting skewing at the edges of the frame and nasty, inverting artifacts when faces are perpendicular to the camera, and the texture isn't perfectly screen aligned.

    Clearly I'm missing something - some transform or scaling factor- anybody have any suggestions?




    Here's the shader:
    Code (CSharp):
    1.  
    2.  
    3. Shader "Custom/UVObject_face_camera_2" {
    4.  
    5.     Properties {
    6.         _debug_texture ("debug_texture", 2D) = "white" {}
    7.         _tilingamount ("tiling amount", Range(0, 5)) = 0.03428461
    8.     }
    9.    
    10.     SubShader {
    11.         Tags {
    12.             "RenderType"="Opaque"
    13.             "DisableBatching"="True"
    14.         }
    15.        
    16.         LOD 200
    17.        
    18.         Pass {
    19.             Name "FORWARD"
    20.             Tags {
    21.                 "LightMode"="ForwardBase"
    22.             }
    23.            
    24.            
    25.             CGPROGRAM
    26.             #pragma vertex vert
    27.             #pragma fragment frag
    28.             #define UNITY_PASS_FORWARDBASE
    29.             #include "UnityCG.cginc"
    30.             #pragma multi_compile_fwdbase_fullshadows
    31.             #pragma only_renderers d3d9 d3d11 glcore gles
    32.             #pragma target 3.0
    33.             uniform sampler2D _debug_texture; uniform float4 _debug_texture_ST;
    34.             uniform float _tilingamount;
    35.            
    36.            
    37.             float4 object_position_to_camera( float3 ObjectPosition ){
    38.                 float4 clipSpacePosition =  mul(UNITY_MATRIX_VP, ObjectPosition);
    39.                 return clipSpacePosition;
    40.             }
    41.            
    42.             float4 vertex_position_to_camera( float3 InputVertices ){
    43.                 float4 clipSpaceVertices =  mul(UNITY_MATRIX_VP, InputVertices);
    44.                 return clipSpaceVertices;
    45.             }
    46.            
    47.             struct VertexInput {
    48.                 float4 vertex : POSITION;
    49.             };
    50.            
    51.             struct VertexOutput {
    52.                 float4 pos : SV_POSITION;
    53.                 float4 posWorld : TEXCOORD0;
    54.             };
    55.            
    56.             VertexOutput vert (VertexInput v) {
    57.                 VertexOutput o = (VertexOutput)0;
    58.                 o.posWorld = mul(unity_ObjectToWorld, v.vertex);
    59.                 o.pos = UnityObjectToClipPos( v.vertex );
    60.                 return o;
    61.             }
    62.            
    63.             float4 frag(VertexOutput i) : COLOR {
    64.                 float4 objPos = mul ( unity_ObjectToWorld, float4(0.0,0.0,0,1) );
    65.                 float2 vertsClipPosition = vertex_position_to_camera( i.posWorld.rgb ).rg;
    66.                 float2 objectClipPosition = object_position_to_camera( objPos.rgb ).rg;
    67.                 float2 generatedUVCoords = float2((vertsClipPosition.r - objectClipPosition.r), (objectClipPosition.g - vertsClipPosition.g));  // generate UV's by subtracting
    68.                 float2 scale_tiles =  generatedUVCoords  *_tilingamount;
    69.                 float4 _debug_texture_var = tex2D(_debug_texture, TRANSFORM_TEX(scale_tiles, _debug_texture));
    70.                 float3 finalColor = _debug_texture_var.rgb;
    71.                 return fixed4(finalColor,1);
    72.             }
    73.            
    74.             ENDCG
    75.         }
    76.     }
    77.    
    78.     FallBack "Diffuse"
    79.     CustomEditor "ShaderForgeMaterialInspector"
    80. }
    81.  
     
  2. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
  3. oceanq

    oceanq

    Joined:
    Apr 10, 2015
    Posts:
    22
    That is super cool! I'll definitely try that - but I'm still looking for a way of getting my screen projected textures to do the right thing.
     
  4. neoshaman

    neoshaman

    Joined:
    Feb 11, 2011
    Posts:
    6,493
    I plan to do that for my project lol, subscribing. I wanted to avoid the swimming on objects rotating too, which might not be possible lol.
     
  5. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    If you're adventurous, the shader I original posted was a mix of Kyle's original and this technique (just sampling a texture rather than a noise function):
    http://graphics.cs.williams.edu/papers/HashedAlphaI3D17/

    Also, I forgot that Kyle's shader doesn't use triplanar mapping, so it's even easier to implement. I had done triplanar for one of my tests and actually abandoned it in favor of a 3d texture for the effect I wanted...

    The stretching you see in the above image is from view space being an orthographic projection. When moving the positions around subtracting the pivot will indeed keep the view space UVs stable, but the stretching across the side is unavoidable.

    Using clip space accounts for perspective and will prevent stretching, but you cannot prevent the "swimming" since the projection changes as the object moves.

    In short it's impossible to have a single view based projection that both prevents stretching and swimming. If you really were firm on keeping both you would have to pick one projection (probably clip space) and apply it as "static" UVs (or a constant matrix from script) so it can be reused for several frames and then swap or blend to a new one when it gets too far. Would work for rotation then too.


    But honestly the constant scale technique is a lot easier and going to be a lot nicer looking.
     
  6. GearKlik

    GearKlik

    Joined:
    Sep 21, 2015
    Posts:
    58
  7. neoshaman

    neoshaman

    Joined:
    Feb 11, 2011
    Posts:
    6,493
    I had try to implement this technique already in the past, I have zero mastery of projection, but didn't unity provide a keyword like screenUV, I remember using that in early test (strumpy node). Why not offset base on that and ditch complex projection?

    The second test I did was simply UVing the character in 0-1 projected space, which works well with rotation along the axis of projection, but of course you would have problem on rotation. In fact I was thinking of projecting an UV gradient and use that as a sampling for screen space texture ... I have been wondering if I could use hybrid with a triplanar masking.

    I don't plan to try anytime soon though, I have other task to complete.
     
  8. oceanq

    oceanq

    Joined:
    Apr 10, 2015
    Posts:
    22
    For future people who come this way looking for a solution to this problem (per-object screen facing UV's) the answer was found here:
    https://realtimevfx.com/t/camera-facing-uvs/384/35

    Code (CSharp):
    1.  
    2.  
    3.  
    4.  
    5. Shader "Custom/UVObject_face_camera_2" {
    6. Properties {
    7. _MainTex ("Color Texture", 2D) = "white" {}
    8. _SSUVScale("UV Scale", Range(0,10)) = 1
    9. }
    10.  
    11. CGINCLUDE
    12.     sampler2D _MainTex;
    13.     float _SSUVScale;
    14.  
    15.     struct appdata {
    16.         float4 vertex : POSITION;
    17.     };
    18.  
    19.     struct v2f {
    20.         float4 pos : POSITION;
    21.         float4 pos2: TEXCOORD0;
    22.     };
    23.  
    24.  
    25.  
    26.     float2 GetScreenUV(float2 clipPos, float UVscaleFactor)
    27.     {
    28.         float4 SSobjectPosition = UnityObjectToClipPos (float4(0,0,0,1.0)) ;
    29.         float2 screenUV = float2(clipPos.x,clipPos.y);
    30.         float screenRatio = _ScreenParams.y/_ScreenParams.x;
    31.  
    32.         screenUV.x -= SSobjectPosition.x/(SSobjectPosition.w);
    33.         screenUV.y -= SSobjectPosition.y/(SSobjectPosition.w);
    34.  
    35.         screenUV.y *= screenRatio;
    36.  
    37.         screenUV *= 1/UVscaleFactor;
    38.         screenUV *= SSobjectPosition.z;
    39.  
    40.         return screenUV;
    41.     };
    42.  
    43.  
    44.  
    45.  
    46. ENDCG
    47.  
    48. SubShader {
    49.       Tags { "RenderType" = "Opaque"
    50.             "DisableBatching"="True"
    51.      }
    52.  
    53.       Pass
    54.     {
    55.         CGPROGRAM
    56.         #pragma vertex vert
    57.         #pragma fragment frag
    58.         #include "UnityCG.cginc"
    59.  
    60.         v2f vert(appdata v) {              
    61.             v2f o;
    62.             o.pos = UnityObjectToClipPos(v.vertex);
    63.             o.pos2 = o.pos;
    64.  
    65.             return o;
    66.         }      
    67.  
    68.         half4 frag(v2f i) :COLOR
    69.         {              
    70.             float2 screenUV = GetScreenUV(i.pos2.xy/ i.pos2.w, _SSUVScale);
    71.             half4 screenTexture = tex2D (_MainTex, screenUV);
    72.  
    73.             return screenTexture;
    74.         }
    75.         ENDCG              
    76.     }
    77.  
    78. }
    79. Fallback "Diffuse"
    80. }
    81.  
     
    Last edited: Jul 27, 2017
    neoshaman likes this.
  9. oceanq

    oceanq

    Joined:
    Apr 10, 2015
    Posts:
    22
    @bgolus - I'm trying to figure out your constant scale technique. Do you have any examples that I could look at?
    Thanks!
     
  10. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    I couldn't find my original example shader from that twitter post, so this is a little different in implementation. That original twitter thread I was using a constant scaling technique I wrote, and in this one I'm using the method from the Hashed Alpha Testing link I posted above. For Dino Frontier I ended up using something more like this, but I think my original implementation looks a little better for this use case.

    Anyway, I'd go grab the example project from Kyle's tutorial page and try using this shader in the demo scene.
    http://kylehalladay.com/blog/tutorial/2017/02/21/Pencil-Sketch-Effect.html

    Code (CSharp):
    1. // Modified shader from http://kylehalladay.com/blog/tutorial/2017/02/21/Pencil-Sketch-Effect.html
    2. Shader "Unlit/SingleObjectHatchConstantScale"
    3. {
    4.     Properties
    5.     {
    6.         _MainTex ("Texture", 2D) = "white" {}
    7.         _Hatch0("Hatch 0", 2D) = "white" {}
    8.         _Hatch1("Hatch 1", 2D) = "white" {}
    9.  
    10.     }
    11.     SubShader
    12.     {
    13.         Tags { "RenderType"="Opaque" }
    14.         LOD 100
    15.  
    16.         Pass
    17.         {
    18.             Tags{ "LightMode" = "ForwardBase" }
    19.  
    20.             CGPROGRAM
    21.             #pragma vertex vert
    22.             #pragma fragment frag
    23.                        
    24.             struct appdata
    25.             {
    26.                 float4 vertex : POSITION;
    27.                 float2 uv : TEXCOORD0;
    28.                 float3 norm : NORMAL;
    29.             };
    30.  
    31.             struct v2f
    32.             {
    33.                 float4 pos : SV_POSITION;
    34.                 float2 uv : TEXCOORD0;
    35.                 float3 nrm : TEXCOORD1;
    36.             };
    37.  
    38.             sampler2D _MainTex;
    39.             float4 _MainTex_ST;
    40.             float4 _MainTex_TexelSize;
    41.  
    42.             sampler2D _Hatch0;
    43.             sampler2D _Hatch1;
    44.             float4 _Hatch0_TexelSize;
    45.             float4 _Hatch1_TexelSize;
    46.  
    47.             float4 _LightColor0;
    48.            
    49.             v2f vert (appdata v)
    50.             {
    51.                 v2f o;
    52.                 o.pos = UnityObjectToClipPos(v.vertex);
    53.                 o.uv = v.uv * _MainTex_ST.xy + _MainTex_ST.zw;
    54.                 o.nrm = mul(float4(v.norm, 0.0), unity_WorldToObject).xyz;
    55.                 return o;
    56.             }
    57.  
    58.             // modified from http://graphics.cs.williams.edu/papers/HashedAlphaI3D17/
    59.             fixed4 tex2DConstScale(sampler2D tex, float texSize, float2 uv)
    60.             {
    61.                 // Find the discretized derivatives of our coordinates
    62.                 float maxDeriv = max( length(ddx(uv)), length(ddy(uv)) );
    63.                 float pixScale = 1.0 / (texSize * maxDeriv);
    64.                 // Find two nearest log-discretized noise scales
    65.                 float2 pixScales = float2(
    66.                     exp2(floor(log2(pixScale))),
    67.                     exp2( ceil(log2(pixScale)))
    68.                     );
    69.                 // Factor to interpolate lerp with
    70.                 float lerpFactor = frac( log2(pixScale) );
    71.  
    72.                 return lerp(
    73.                     tex2D(tex, pixScales.x * uv),
    74.                     tex2D(tex, pixScales.y * uv),
    75.                     lerpFactor
    76.                     );
    77.             }
    78.  
    79.             fixed3 Hatching(float2 _uv, half _intensity)
    80.             {
    81.                 half3 hatch0 = tex2DConstScale(_Hatch0, _Hatch0_TexelSize.z, _uv).rgb;
    82.                 half3 hatch1 = tex2DConstScale(_Hatch1, _Hatch1_TexelSize.z, _uv).rgb;
    83.  
    84.                 half3 overbright = max(0, _intensity - 1.0);
    85.  
    86.                 half3 weightsA = saturate((_intensity * 6.0) + half3(-0, -1, -2));
    87.                 half3 weightsB = saturate((_intensity * 6.0) + half3(-3, -4, -5));
    88.  
    89.                 weightsA.xy -= weightsA.yz;
    90.                 weightsA.z -= weightsB.x;
    91.                 weightsB.xy -= weightsB.zy;
    92.  
    93.                 hatch0 = hatch0 * weightsA;
    94.                 hatch1 = hatch1 * weightsB;
    95.  
    96.                 half3 hatching = overbright + hatch0.r +
    97.                     hatch0.g + hatch0.b +
    98.                     hatch1.r + hatch1.g +
    99.                     hatch1.b;
    100.  
    101.                 return hatching;
    102.  
    103.             }
    104.  
    105.            
    106.             fixed4 frag (v2f i) : SV_Target
    107.             {
    108.                 fixed4 color = tex2D(_MainTex, i.uv);
    109.                 fixed3 diffuse = color.rgb * _LightColor0.rgb * dot(_WorldSpaceLightPos0, normalize(i.nrm));
    110.  
    111.                 fixed intensity = dot(diffuse, fixed3(0.2326, 0.7152, 0.0722));
    112.  
    113.                 color.rgb =  Hatching(i.uv * 8, intensity);
    114.  
    115.                 return color;
    116.             }
    117.             ENDCG
    118.         }
    119.     }
    120. }
     
    oceanq likes this.
  11. oceanq

    oceanq

    Joined:
    Apr 10, 2015
    Posts:
    22
    Thanks very much! I'm going to spend some time pulling it apart and understanding how it works. Lots to learn here!
     
  12. MaT227

    MaT227

    Joined:
    Jul 3, 2012
    Posts:
    628
    Hey everyone,

    I am currently trying to sample a texture in screen space and it works well. But I would like to be able to constrain uv position and scale based on object's position and distance from camera. I found this post and other examples but I also faced some issues and for the moment I don't see how to fix them.

    I made another subject with code examples and a gif here : https://forum.unity.com/threads/per-object-screen-space-uv-issue.903581/

    Thanks a lot !
     
  13. JakHussain

    JakHussain

    Joined:
    Oct 20, 2016
    Posts:
    318
    I've been trying to make sense of how the camera facing uvs are calculated in that very sample but for the life of me I don't understand how.

    I'm very new to shaders and have a library of sub graphs i'm using as reusable functions in shader graph and so I'm trying to create that pencil effect in shader graph.

    I can make it work as an object shader and can get this effect in a custom blit render feature:

    upload_2020-9-3_18-4-56.png

    So the last piece of the puzzle for me is to figure out the camera facing uvs in shader graph but I'm having a really hard time understanding what's happening. How are the UVs being sampled in the replacement shader and how can I do the same with shader graph?
     
  14. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,352
    Because it doesn't use camera facing UVs. It uses the mesh's default UVs. My alternative shader scales the UVs so the texture is roughly constantly sized in screen space. But it's still using the mesh UVs. The whole thing with that example shader was to show you didn't necessarily need screen space UVs.

    Now, for Shader Graph, if you do just want screen space UVs, you can use the screen position node.