Hello, http://docs.unity3d.com/Manual/ComputeShaders.html says "However, it is also possible to write compute shaders in GLSL by inserting your code between GLSLPROGRAM / ENDGLSL tags." but doesn't exactly tell how to do that. For vertex shaders I add "#ifdef VERTEX", for fragment shaders I add "#ifdef FRAGMENT" but what about compute shaders? I appreciate it too if there's a simple example for a compute shader program in GLSL
Anyone? On Unity 5.1.2f1 I wrote Compute Shader code in HLSL and it ran fine on an Android 5 device, then I put #pragma kernel main at the top of the .compute file and placed compiled GLSL of the HLSL I used above (Compiled Code you get in the inspector of the compute shader) between GLSLPROGRAM / ENDGLSL tags. Then I built the app, and it's not working though I just used the translated GLSL (#version 310 es at the top) from the working HLSL.
Unity Editor set <OpenGL 4.5> and API in Player Settings OpenGLCore. *.compute file content: Code (CSharp): GLSLPROGRAM #pragma kernel main #version 420 #extension GL_ARB_shading_language_420pack : require #ifdef GL_ARB_compute_shader #extension GL_ARB_compute_shader : enable #endif #ifdef COMPUTE layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in; void main() { return; } #endif ENDGLSL which should be equivalent to: Code (CSharp): #pragma kernel CSMain [numthreads(8,8,1)] void CSMain (uint2 id : SV_DispatchThreadID) { } Error: "Compute shader compilation had a problem, your compute shader was not compiled correctly (on glcore)".
Please submit a bugreport If you post the bug number here, it will be easier to follow up on it Bugreports also contain additional information that might be helpful. Thanks!
Minimal working example with Unity 2022 (code is not optimized, I just show language syntax and setup): Code (CSharp): using UnityEngine; public class ShaderStorageBufferObject : MonoBehaviour { [SerializeField] ComputeShader _ComputeShader; [SerializeField] int _Resolution = 1024; [SerializeField] FilterMode _FilterMode = FilterMode.Bilinear; ComputeBuffer _RWStructuredBuffer, _ConstantBuffer; byte[] _Bytes; Texture2D _Texture; void Start() { _RWStructuredBuffer = new ComputeBuffer(_Resolution * _Resolution, sizeof(float), ComputeBufferType.Structured); _ConstantBuffer = new ComputeBuffer(2, sizeof(float), ComputeBufferType.Constant); _Bytes = new byte[_Resolution * _Resolution * sizeof(float)]; _Texture = new Texture2D(_Resolution, _Resolution, TextureFormat.RGBA32, false, false); GameObject plane = GameObject.CreatePrimitive(PrimitiveType.Plane); Material material = plane.GetComponent<Renderer>().material; material.shader = Shader.Find("Sprites/Default"); material.mainTexture = _Texture; _Texture.filterMode = _FilterMode; } void Update() { _ConstantBuffer.SetData(new float[]{Time.time, (float)_Resolution}); _ComputeShader.SetConstantBuffer("_UniformBuffer", _ConstantBuffer, 0, 2 * sizeof(float)); _ComputeShader.SetBuffer(0, "_StorageBuffer", _RWStructuredBuffer); _ComputeShader.Dispatch(0, _Resolution / 8, _Resolution / 8, 1); _RWStructuredBuffer.GetData(_Bytes); _Texture.LoadRawTextureData(_Bytes); _Texture.Apply(); } void OnDestroy() { Destroy(_Texture); _RWStructuredBuffer.Release(); _ConstantBuffer.Release(); } } Code (CSharp): #pragma kernel main GLSLPROGRAM #version 430 layout (std430, binding = 0) writeonly buffer _StorageBuffer {float mainImage[];}; layout (std140, binding = 0) uniform _UniformBuffer {float iTime; float iResolution;}; layout (local_size_x = 8, local_size_y = 8, local_size_z = 1) in; float RGBAToFloat(vec4 c) { int rgba = (int(c.w * 255.0) << 24) + (int(c.z * 255.0) << 16) + (int(c.y * 255.0) << 8) + int(c.x * 255.0); return intBitsToFloat(rgba); } vec3 Hash(vec2 p ) { vec3 q = vec3(dot(p,vec2(127.1,311.7)), dot(p,vec2(269.5,183.3)), dot(p,vec2(419.2,371.9))); return fract(sin(q)*43758.5453); } vec3 Noise(vec2 p) { vec2 i = floor(p); vec2 u = p - floor(p); u = vec2(u.x*u.x*(3.0-2.0*u.x), u.y*u.y*(3.0-2.0*u.y)); vec3 res = mix(mix(Hash(i),Hash(vec2(i.x+1.0,i.y)),u.x),mix(Hash(vec2(i.x,i.y+1.0)),Hash(vec2(i.x+1.0,i.y+1.0)),u.x),u.y); return res * res; } vec3 Fbm(vec2 p) { vec3 v = vec3(0.0); vec3 a = vec3(0.5); for (int i = 0; i < 5; ++i) { v = v + a * Noise(p); p = vec2((0.87 * p.x - 0.48 * p.y),(0.48 * p.x + 0.87 * p.y)) * 2.0; a = a * vec3(0.5); } return v; } vec3 Pattern (vec2 p, float time) { vec3 q = Fbm(vec2(p.x + 5.0, p.y + 1.0)); vec3 r = Fbm(vec2(p.x + 4.0 * q.x - time * 0.5, p.y + 4.0 * q.y + time * 0.3)); return Fbm(vec2(p.x + 8.0 * r.x, p.y + 8.0 * r.z)); } void main() { vec2 fragCoord = gl_GlobalInvocationID.xy; vec2 uv = fragCoord.xy / vec2(iResolution); vec4 fragColor = vec4(Pattern(uv, iTime), 1.0); mainImage[int(iResolution) * gl_GlobalInvocationID.y + gl_GlobalInvocationID.x] = RGBAToFloat(clamp(vec4(fragColor.rgb, 1.0), vec4(0.0), vec4(1.0))); } ENDGLSL
For those who care (that post is the only one where I found GLSL example): I lost a day around vec3 buffer that, returned to CPU in Vector3 array, got filled with "random" 0. I knew it was something related to byte size, in fact here they say vec3s are treated as 16-bytes. Solved creating in my compute shader: Code (CSharp): struct MyVec3 { float x; float y; float z; }; And using it in my return buffer.