Search Unity

  1. Welcome to the Unity Forums! Please take the time to read our Code of Conduct to familiarize yourself with the forum rules and how to post constructively.
  2. Dismiss Notice

Question GetFloat from a shader not working.

Discussion in 'Shaders' started by fwalker, Aug 2, 2023.

  1. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    I have this shader which works great. I need to retrieve the maxX,Y and minX,Y from the shader in my code and I am not having any success. I can set the properties fine, but their value never seem to change. I am trying to read the properties in the Update() of a monobehaviour and I don't even know if it is the correct place to read the values from? (new to shaders)

    Code (CSharp):
    1. Shader"Unlit/Test"
    2. {
    3.     Properties
    4.     {
    5.         _MainTex ("Texture", 2D) = "white" {}
    6.         [Toggle] _drawArrows("DrawArrows ?", float) = 0
    7.  
    8.         _MinX ("MinX", Float) = 0
    9.         _MaxX ("MaxX", Float) = 0
    10.  
    11.         _MinY ("MinX", Float) = 0
    12.         _MaxY ("MaxX", Float) = 0
    13.     }
    14.     SubShader
    15.     {
    16.         Tags { "RenderType"="Opaque" }
    17.         LOD 100
    18.  
    19.         Pass
    20.         {
    21.             CGPROGRAM
    22.             #pragma vertex vert
    23.             #pragma fragment frag
    24.             // make fog work
    25.             #pragma multi_compile_fog
    26.  
    27.             #include "UnityCG.cginc"
    28.  
    29.             float _MinX;
    30.             float _MaxX;
    31.             float _MinY;
    32.             float _MaxY;
    33.  
    34.             struct appdata
    35.             {
    36.                 float4 vertex : POSITION;
    37.                 float2 uv : TEXCOORD0;
    38.             };
    39.  
    40.             struct v2f
    41.             {
    42.                 float2 uv : TEXCOORD0;
    43.                 UNITY_FOG_COORDS(1)
    44.                 float4 vertex : SV_POSITION;
    45.             };
    46.  
    47.             sampler2D _MainTex;
    48.             float4 _MainTex_ST;
    49.  
    50.             float _drawArrows;
    51.             v2f vert (appdata v)
    52.             {
    53.                 v2f o;
    54.                 o.vertex = UnityObjectToClipPos(v.vertex);
    55.                 o.uv = TRANSFORM_TEX(v.uv, _MainTex);
    56.                 UNITY_TRANSFER_FOG(o,o.vertex);
    57.                 return o;
    58.             }
    59.  
    60.             // Debug Motion Vector from https://github.com/Unity-Technologies/Graphics/blob/master/com.unity.render-pipelines.high-definition/Runtime/Debug/DebugFullScreen.shader#L221
    61.  
    62.             // Motion vector debug utilities
    63.             float DistanceToLine(float2 p, float2 p1, float2 p2)
    64.             {
    65.                 float2 center = (p1 + p2) * 0.5;
    66.                 float len = length(p2 - p1);
    67.                 float2 dir = (p2 - p1) / len;
    68.                 float2 rel_p = p - center;
    69.                 return dot(rel_p, float2(dir.y, -dir.x));
    70.             }
    71.  
    72.             float DistanceToSegment(float2 p, float2 p1, float2 p2)
    73.             {
    74.                 float2 center = (p1 + p2) * 0.5;
    75.                 float len = length(p2 - p1);
    76.                 float2 dir = (p2 - p1) / len;
    77.                 float2 rel_p = p - center;
    78.                 float dist1 = abs(dot(rel_p, float2(dir.y, -dir.x)));
    79.                 float dist2 = abs(dot(rel_p, dir)) - 0.5 * len;
    80.                 return max(dist1, dist2);
    81.             }
    82.  
    83.             float2 SampleMotionVectors(float2 coords)
    84.             {
    85.                 float4 col = tex2D(_MainTex, coords);
    86.  
    87.                 // In case material is set as no motion
    88.                 if (col.x > 1)
    89.                     return 0;
    90.                 else
    91.                     return col.xy;
    92.             }
    93.  
    94.             float DrawArrow(float2 texcoord, float body, float head, float height, float linewidth, float antialias)
    95.             {
    96.                 float w = linewidth / 2.0 + antialias;
    97.                 float2 start = -float2(body / 2.0, 0.0);
    98.                 float2 end = float2(body / 2.0, 0.0);
    99.  
    100.                 // Head: 3 lines
    101.                 float d1 = DistanceToLine(texcoord, end, end - head * float2(1.0, -height));
    102.                 float d2 = DistanceToLine(texcoord, end - head * float2(1.0, height), end);
    103.                 float d3 = texcoord.x - end.x + head;
    104.  
    105.                 // Body: 1 segment
    106.                 float d4 = DistanceToSegment(texcoord, start, end - float2(linewidth, 0.0));
    107.  
    108.                 float d = min(max(max(d1, d2), -d3), d4);
    109.                 return d;
    110.             }
    111.  
    112.             #define PI 3.14159265359
    113.             float4 frag (v2f i) : SV_Target
    114.             {
    115.                 float motionVectMin = -1;
    116.                 float motionVectMax = 1;
    117.                
    118.                 float rgMin = 0;
    119.                 float rgMax = 1;
    120.    
    121.                 float motionVectorRange =  motionVectMax - motionVectMin;
    122.                 float rgRange =  rgMax - rgMin;
    123.    
    124.                 float2 mv = SampleMotionVectors(i.uv);
    125.        
    126.                 _MaxX = 1.3f; //    max(_MaxX, mv.x);
    127.                 _MaxY = max(_MaxY, mv.y);
    128.                
    129.                 _MinX = min(_MinX, mv.x);
    130.                 _MinY = min(_MinY, mv.y);
    131.    
    132.                 // Background color intensity - keep this low unless you want to make your eyes bleed
    133.                 const float kMinIntensity = 0.03f;
    134.                 const float kMaxIntensity = 0.50f;
    135.  
    136.                 // Map motion vector direction to color wheel (hue between 0 and 360deg)
    137.                 float phi = atan2(mv.x, mv.y);
    138.                 float hue = (phi / PI + 1.0) * 0.5;
    139.                 float r = abs(hue * 6.0 - 3.0) - 1.0;
    140.                 float g = 2.0 - abs(hue * 6.0 - 2.0);
    141.                 float b = 2.0 - abs(hue * 6.0 - 4.0);
    142.      
    143.                 float maxSpeed = 60.0f / 0.15f; // Admit that 15% of a move the viewport by second at 60 fps is really fast
    144.                 float absoluteLength = saturate(length(mv.xy) * maxSpeed);
    145.                 float3 color = float3(r, g, b) * lerp(kMinIntensity, kMaxIntensity, absoluteLength);
    146.                 color = saturate(color);
    147.  
    148.                 if (!_drawArrows)
    149.                     return float4(color, 1);
    150.    
    151.                 // Grid subdivisions - should be dynamic
    152.                 const float kGrid = 64.0;
    153.  
    154.                 float arrowSize = 500;
    155.                 float4 screenSize = float4(arrowSize, arrowSize, 1.0 / arrowSize, 1.0 / arrowSize);
    156.  
    157.                 // Arrow grid (aspect ratio is kept)
    158.                 float aspect = screenSize.y * screenSize.z;
    159.                 float rows = floor(kGrid * aspect);
    160.                 float cols = kGrid;
    161.                 float2 size = screenSize.xy / float2(cols, rows);
    162.                 float body = min(size.x, size.y) / sqrt(2.0);
    163.                 float2 positionSS = i.uv;
    164.                 positionSS *= screenSize.xy;
    165.                 float2 center = (floor(positionSS / size) + 0.5) * size;
    166.                 positionSS -= center;
    167.  
    168.                 // Sample the center of the cell to get the current arrow vector
    169.                 float2 mv_arrow = 0.0f;
    170. #if DONT_USE_NINE_TAP_FILTER
    171.                 mv_arrow = SampleMotionVectors(center * screenSize.zw);
    172. #else
    173.                 for (int i = -1; i <= 1; ++i) for (int j = -1; j <= 1; ++j)
    174.                     mv_arrow += SampleMotionVectors((center + float2(i, j)) * screenSize.zw);
    175.                 mv_arrow /= 9.0f;
    176. #endif
    177.                 mv_arrow.y *= -1;
    178.  
    179.                 // Skip empty motion
    180.                 float d = 0.0;
    181.                 if (any(mv_arrow))
    182.                 {
    183.                     // Rotate the arrow according to the direction
    184.                     mv_arrow = normalize(mv_arrow);
    185.                     float2x2 rot = float2x2(mv_arrow.x, -mv_arrow.y, mv_arrow.y, mv_arrow.x);
    186.                     positionSS = mul(rot, positionSS);
    187.  
    188.                     d = DrawArrow(positionSS, body, 0.25 * body, 0.5, 2.0, 1.0);
    189.                     d = 1.0 - saturate(d);
    190.                 }
    191.  
    192.                 // Explicitly handling the case where mv == float2(0, 0) as atan2(mv.x, mv.y) above would be atan2(0,0) which
    193.                 // is undefined and in practice can be incosistent between compilers (e.g. NaN on FXC and ~pi/2 on DXC)
    194.                 if(!any(mv))
    195.                     color = float3(0, 0, 0);
    196.                    
    197.                 return float4(color + d.xxx, 1);
    198.             }
    199.             ENDCG
    200.         }
    201.     }
    202. }
    203.  
    Here is how I read the properties:
    Code (CSharp):
    1.  
    2.  
    3.    private void Update()
    4.     {
    5.        
    6.         maxX = material.GetFloat("_MaxX");
    7.         maxY = material.GetFloat("_MaxY");
    8.         minX = material.GetFloat("_MinX");
    9.         minY = material.GetFloat("_MinY");
    10.  
    11.  
    12.         Debug.Log("Max X = " + maxX);
    13.         Debug.Log("Max Y = " + maxY);
    14.         Debug.Log("Min X = " + minX);
    15.         Debug.Log("Min Y = " + minY);
    16.  
    17.     }
    18.  
     
  2. DevDunk

    DevDunk

    Joined:
    Feb 13, 2020
    Posts:
    4,362
    What actually gets logged?
    And are you sure the material is the one in the scene? If you set a property maybe you're setting it in an instance and not the shared material
     
  3. Rukhanka

    Rukhanka

    Joined:
    Dec 14, 2022
    Posts:
    177
    If you are meant to read the values computed by shader, you are out of luck with your approach. You can use ComputeBuffer, and read it with CPU-GPU synchronization cost or delayed by several frames.
     
  4. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,221
    The connection from the material to the shader is one way. The values you’re setting in the shader are temporary copies passed to each individual visible pixel for each frame’s execution of that shader and immediately thrown away once that pixel finishes rendering. Even before that, the material’s properties are copied to a separate buffer that is what gets sent to the GPU and any connection back to the material is lost immediately, because it is a one way connection.

    As @Rukhanka mentioned one way around this is using a compute buffer / structured buffer with random writes enabled. However remember I mentioned every single individual pixel this shader is visible at is running the code? Which pixel’s value do you care about? You either need to determine that beforehand and limit which pixel writes values to the buffer, or have a big enough buffer that they can all write values to their own index in the buffer.

    Then you need to request that buffer be copied back to the CPU. And that can take between a few ms and several frames depending on how much data you’re moving, and what else the GPU already has queued up to do. The copy back requires the GPU to stall all other operations until it’s done, so it has to finish what it’s currently doing first.

    If this is for debugging, use RenderDoc. If this is for some effect in a game, don’t do that.
     
  5. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    Super helpful @bgolus. Thank you. So why have the GetFloat option on the material, for example? Confused.
    All I need is 4 floats min and max from the Shader. Does that change anything?
     
  6. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,221
    Because if you can set the values from c#, it's nice to be able to query them from c# too. They can be used as their own data containers and not require additional c# code that has that duplicate data.

    An example of this: imagine you have a dozen meshes that are different conveyer belts, and each can be controlled individually by the player. Instead of having special shaders that animate the belts, it's easier to have the c# code offset the texture coordinates, possibly via a custom material property. The c# code then needs to know what the time delta is, which Unity's engine provides, and the belt velocity. Then each frame you read back that material property, add the velocity x time delta to it, and apply it again.

    It'd probably make more sense to keep the conveyer belt position in script still for other reasons, like moving things that are on the belt, so this is a somewhat contrived example, but cases like this do exist where it makes sense to not have to keep track of data in the c# code.

    As for the "I only need 4 floats". It matters in that it's not that much data, but it doesn't change the fact the GPU has to stall to send the data back. And depending on the computer hardware you're running on, what is being rendered at the moment you request the data, the time it takes to return the information back to the CPU from the GPU is going to be between a few ms and several frames (ie: tens or hundreds of ms).

    And, again, from which pixel do you want the data from?! You can't have them all writing out to the same 4 float values. (Well, you can, but good luck getting useful data out at the end.) And in 99.9% of cases where someone wants to do this for a real time use, the correct answer is ... don't do that. Calculate the value you need on the CPU manually, or do what you were going to do on the CPU with that data on the GPU so you don't have to move it back.
     
  7. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    @bgolus Your explanations are awesome. Thank you for taking the time.
    I have been at this for days and I am going a little crazy. So the bottom line is that I need the information from the motion vector "texture" and I need to find out the min and max values (for X and Y) in that texture every frame.
    I finally managed to get the code working to get a hold of the motion vector texture.
    From there, I have two options
    1. Read the content of that texture convert to floats and store the min and max
    2. Save the data to a exr file and use python to work with that data.

    I have tried 2 first and I just can't manage to get the correct output to the exr file. I would expect the background to be black since nothing is moving, yet I get blue and the array seems to contain 0,0,0,128 for the background.

    This is what I have so far. I am close, yet so far:
    Code (CSharp):
    1. using UnityEngine;
    2. using UnityEngine.Rendering;
    3. using UnityEngine.Rendering.HighDefinition;
    4.  
    5. #if UNITY_EDITOR
    6. using UnityEditor.Rendering.HighDefinition;
    7.  
    8. [CustomPassDrawer(typeof(CopyPass))]
    9. public class CopyPassDrawer : CustomPassDrawer
    10. {
    11.     protected override PassUIFlag commonPassUIFlags => PassUIFlag.Name;
    12. }
    13.  
    14. #endif
    15.  
    16. public class CopyPass : CustomPass
    17. {
    18.     static int frameNum = 0;
    19.  
    20.     public enum BufferType
    21.     {
    22.         Color,
    23.         Normal,
    24.         Roughness,
    25.         Depth,
    26.         MotionVectors,
    27.     }
    28.  
    29.     public RenderTexture outputRenderTexture;
    30.  
    31.     [SerializeField, HideInInspector]
    32.     Shader customCopyShader;
    33.     Material customCopyMaterial;
    34.  
    35.     public BufferType bufferType;
    36.  
    37.     protected override bool executeInSceneView => false;
    38.  
    39.     protected override void Setup(ScriptableRenderContext renderContext, CommandBuffer cmd)
    40.     {
    41.         if (customCopyShader == null)
    42.             customCopyShader = Shader.Find("Hidden/FullScreen/CustomCopy");
    43.         customCopyMaterial = CoreUtils.CreateEngineMaterial(customCopyShader);
    44.  
    45.     }
    46.  
    47.     protected override void Execute(CustomPassContext ctx)
    48.     {
    49.         if (outputRenderTexture == null || customCopyMaterial == null)
    50.             return;
    51.  
    52.         SyncRenderTextureAspect(outputRenderTexture, ctx.hdCamera.camera);
    53.  
    54.         var scale = RTHandles.rtHandleProperties.rtHandleScale;
    55.         customCopyMaterial.SetVector("_Scale", scale);
    56.  
    57.         ctx.cmd.Blit(ctx.cameraMotionVectorsBuffer, outputRenderTexture, new Vector2(scale.x, scale.y), Vector2.zero, 0, 0);
    58.  
    59.         SaveTextureToFile(outputRenderTexture);
    60.     }
    61.     public void SaveTextureToFile(Texture source, bool asynchronous = true, System.Action<bool> done = null)
    62.     {
    63.         // Capture the texture content
    64.         RenderTexture.active = outputRenderTexture;
    65.  
    66.         // Create a new texture to hold the captured data
    67.         Texture2D tex = new Texture2D(outputRenderTexture.width, outputRenderTexture.height, TextureFormat.RGHalf, false);
    68.         tex.ReadPixels(new Rect(0, 0, outputRenderTexture.width, outputRenderTexture.height), 0, 0);
    69.         tex.Apply();
    70.  
    71.         // Save the captured data as an EXR file
    72.         byte[] exrData = tex.EncodeToEXR(Texture2D.EXRFlags.None);
    73.         System.IO.File.WriteAllBytes("c:/tmp/TestCapturedTexture.exr", exrData);
    74.  
    75.         ConvertRGHalfToFloatArray(tex);
    76.  
    77.         // Reset the active render texture
    78.         RenderTexture.active = null;
    79.  
    80.     }
    81.  
    82.     void ConvertRGHalfToFloatArray(Texture2D source)
    83.     {
    84.         int width = source.width;
    85.         int height = source.height;
    86.  
    87.         float[] outputFloatArray = new float[width * height * 2]; // 2 components: R and G
    88.  
    89.         Color[] pixels = source.GetPixels();
    90.  
    91.       //This is as far as I got now I need to figure out how to convert Color to float.
    92.     }
    93.  
    94.     void SyncRenderTextureAspect(RenderTexture rt, Camera camera)
    95.     {
    96.         float aspect = rt.width / (float)rt.height;
    97.  
    98.         if (!Mathf.Approximately(aspect, camera.aspect))
    99.         {
    100.             rt.Release();
    101.             rt.width = camera.pixelWidth;
    102.             rt.height = camera.pixelHeight;
    103.             rt.Create();
    104.         }
    105.     }
    106.  
    107.     protected override void Cleanup()
    108.     {
    109.         CoreUtils.Destroy(customCopyMaterial);
    110.     }
    111.  
    112.  
    113. }
    Any chance you might be able to shed some wisdom on this? Please

    Note: the OutputTexture does seem to contain the correct information when I look at it on the Editor, so I think my problem is with the exr conversion.
     
    Last edited: Aug 3, 2023
  8. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,221
    I can't remember if Unity's motion vector texture is stored in a 0.0 to 1.0 range or -1.0 to 1.0 range. Though if you're seeing "128" anywhere, that means whatever you're using to look at the image is converting the exr's float values to a byte representation, and possibly applying a gamma correction to it.

    I highly recommend you get RenderDoc and use that to look at the data Unity is producing internally as a point of "truth" to start from.
     
  9. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    Got RenderDoc and looking into it. Thank you. Motion Vector (according to doc) stores -1.0 to 1.0.

    I am viewing the outputs in Photoshop. I don't think is the problem.
     
  10. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    Ok I figured out why the "gamma" issue. This code works great with TextureFormat.RGBAHalf but red for example becomes magenta and black becomes blue when using TextureFormat.RGHalf.
    So then how do I copy the motion vector texture which is R16G16_SFLOAT to the exr?
    Do I need to convert it to R16G16B16A16_SFLOAT first I take it? :(

    Code (CSharp):
    1. // Capture the texture content
    2.         RenderTexture.active = outputRenderTexture;
    3.  
    4.         // Create a new texture to hold the captured data
    5.         Texture2D tex = new Texture2D(outputRenderTexture.width, outputRenderTexture.height, TextureFormat.RGHalf, false);
    6.         Color fillColor = new Color(1.0f, 0.0f, 0.0f, 1.0f);
    7.         Color[] fillColorArray = tex.GetPixels();
    8.  
    9.         for (var i = 0; i < fillColorArray.Length; ++i)
    10.         {
    11.             fillColorArray[i] = fillColor;
    12.         }
    13.  
    14.         tex.SetPixels(fillColorArray);
    15.         tex.Apply();
    16.  
    17.         // Save the captured data as an EXR file
    18.         byte[] exrData = tex.EncodeToEXR(Texture2D.EXRFlags.None);
    19.         System.IO.File.WriteAllBytes("c:/tmp/TestCapturedTexture.exr", exrData);
    20.  
    21.  
    22.         // Save the captured data as an PNG file
    23.         byte[] pngData = tex.EncodeToPNG();
    24.         System.IO.File.WriteAllBytes("c:/tmp/TestCapturedTexture.png", pngData);
     
  11. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    I tried to convert textures using Graphics.ConvertTexture like this but with different formats on tex2. The result is a black texture instead of red. Why is this so difficult? It is killing me.::(

    Code (CSharp):
    1. public void SaveTextureToFile(Texture source, System.Action<bool> done = null)
    2.     {
    3.         // Capture the texture content
    4.         RenderTexture.active = outputRenderTexture;
    5.  
    6.         // Create a new texture to hold the captured data
    7.         Texture2D tex = new Texture2D(outputRenderTexture.width, outputRenderTexture.height, TextureFormat.RGHalf, false);
    8.         Color fillColor = new Color(1.0f, 0.0f, 0.0f, 0.0f);
    9.         Color[] fillColorArray = tex.GetPixels();
    10.  
    11.         for (var i = 0; i < fillColorArray.Length; ++i)
    12.         {
    13.             fillColorArray[i] = fillColor;
    14.         }
    15.  
    16.         tex.SetPixels(fillColorArray);
    17.         tex.Apply();
    18.  
    19.         Texture2D tex2 = new Texture2D(outputRenderTexture.width, outputRenderTexture.height, TextureFormat.RGBAFloat, false); //I tried RGBAHalf as well which should have  worked
    20.         Graphics.ConvertTexture(tex, tex2);
    21.  
    22.         // Save the captured data as an EXR file
    23.         byte[] exrData = tex2.EncodeToEXR(Texture2D.EXRFlags.None);
    24.         System.IO.File.WriteAllBytes("c:/tmp/TestCapturedTexture.exr", exrData);
    25.  
    26.         // Save the captured data as an PNG file
    27.         byte[] pngData = tex2.EncodeToPNG();
    28.         System.IO.File.WriteAllBytes("c:/tmp/TestCapturedTexture.png", pngData);
    29.     }
    30.  
     
  12. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,221
    What's the problem with this? RGHalf has no blue or alpha channel data. The fact it turns blue afterwards to me just means the converter is filling in the empty channels with 1.0. But they can and should be ignored when you're sampling the texture later anyway, so the values they become are irrelevant.

    Code (csharp):
    1. void DoThing()
    2. {
    3.         Vector4 TestColor = new Vector4(123.4f, -0.1234f);
    4.  
    5.         RenderTexture rt = RenderTexture.GetTemporary(64, 64, 0, RenderTextureFormat.RGHalf);
    6.         RenderTexture.active = rt;
    7.  
    8.         GL.Clear(false, true, TestColor);
    9.  
    10.         Texture2D tex = new Texture2D(rt.width, rt.height, TextureFormat.RGHalf, false, true);
    11.         tex.ReadPixels(new Rect(0,0,rt.width,rt.height), 0, 0, false);
    12.  
    13.         byte[] bytes = ImageConversion.EncodeToEXR(tex);
    14.         File.WriteAllBytes(Application.dataPath + "/../SavedRenderTexture.exr", bytes);
    15. }
    Using this code the values I saw in both RenderDoc and Photoshop matched the "test color" value I used. Yes, blue and alpha were 1.0, but again, those values don't matter because the RGHalf format doesn't have them to begin with so they should be ignored. It could be random noise and it doesn't matter because they do not affect the RG channel values.

    If you want to zero out the blue channel while you're testing for personal sanity, then do this:

    Code (csharp):
    1. TestColor = new Vector4(-0.12345f, 123.45f);
    2.  
    3. RenderTexture rt = RenderTexture.GetTemporary(64, 64, 0, RenderTextureFormat.RGHalf);
    4. RenderTexture.active = rt;
    5.  
    6. GL.Clear(false, true, TestColor);
    7.  
    8. Texture2D tex = new Texture2D(rt.width, rt.height, TextureFormat.RGBAHalf, false, true);
    9. tex.ReadPixels(new Rect(0,0,rt.width,rt.height), 0, 0, false);
    10.  
    11. Color[] cols = tex.GetPixels();
    12. for (int i=0; i<cols.Length; i++)
    13.     cols[i].b = 0.0f;
    14. tex.SetPixels(cols);
    15.  
    16. byte[] bytes = ImageConversion.EncodeToEXR(tex);
    17. File.WriteAllBytes(Application.dataPath + "/../SavedRenderTexture.exr", bytes);
    Yes, you can copy an
    RGHalf
    render texture into an
    RGBAHalf
    texture. But as soon as you do this, you might as well just process the min / max values of the texture in Unity since you're already iterating over all the pixels.
     
    Last edited: Aug 5, 2023
  13. bgolus

    bgolus

    Joined:
    Dec 7, 2012
    Posts:
    12,221
    To get back to the original task, the way I'd handle getting the min-max values from the texture would be by doing that on the GPU itself. This could be done with a compute shader, or I'm kind of lazy and would use a multi-pass downsample shader to do it. The idea is to copy the RGHalf into an ARGBHalf render texture with the RG channels duplicated in the BA channels with a shader blit. Then render into successively half-sized ARGBHalf textures where you read the 4 texels of the previous texture, get the min value of the RG and max value of the BA and output that. Keep doing that until you only have 1 pixel left in the render target. Then use ReadPixels() on that final render texture. Then use an async readback to get the final values... or more realistically continue to do whatever I need those values for on the GPU.
     
  14. fwalker

    fwalker

    Joined:
    Feb 5, 2013
    Posts:
    249
    Last edited: Aug 29, 2023
    DevDunk likes this.