我们来看一下URP下的LitShader。LitShader也是基于物理渲染的,很多方法和属性看过默认管线PBR代码的应该都会很熟悉,我们现在再过一遍,加深一下印象,同时疏通一下以前可能没有掌握的地方。
先看Shader的Properties:
// Specular vs Metallic workflow
[HideInInspector] _WorkflowMode("WorkflowMode", Float) = 1.0
工作流还是Specular和Metallic。说到这两个流程的区别,其实笔者认为他们只是在不同输入形式同样的算法下产生同样的结果。所以叫工作流,因为材质需要的贴图产出流程是不一样的。但是不同输入的形式其实决定了可控制参数的多少和基于物理自定义效果的程度。
首先看看Metallic Workflow:Metallic工作流的输入是五张贴图(当然并不是每张贴图都是必须的),分别是主纹理、法线、环境遮蔽、金属度、自发光。
对比一下Specular Workflow:Specular工作流输入的是还是五张贴图:分别是主纹理、法线、环境遮蔽、高光贴图、自发光。
通过对比我们发现两个工作流唯一不同的输入就是 金属度贴图vs高光贴图 那么究竟这两种输入方式对于渲染效果有着什么影响呢(其实熟悉PBR的小伙伴都知道,借着URP的机会讲讲PBR~手动滑稽)?我们看完Lit的代码就可以理解了。
下面的属性和之前基本上一样,相比之前不同的是没有了DetailTexture、DetailNormal和DetailMask。其他属性会在将shader计算中提到,这里先跳过。我们先看ForwardPass:
Name "ForwardLit"
Tags{"LightMode" = "UniversalForward"}
这里的标签就是当时我们讲URP的ForwardRenderer时会执行ShaderPass的标签,接下来时几个基本指令的参数化:
Blend[_SrcBlend][_DstBlend]
ZWrite[_ZWrite]
Cull[_Cull]
混合模式、深度写入、Cull模式都做了参数化,在Material的Inspector面板上变得可操作,这个我们以后做自定义的Shader时可以学习一下。
之后就是一些预编译指令,我们后面都会讲到,需要关注的是:
#include "LitInput.hlsl"
#include "LitForwardPass.hlsl"
所有的Lit方法执行都在这两个hlsl文件中。我们直接看LitForwardPass.hlsl,按照通常的习惯,首先找到顶点着色器:
Varyings LitPassVertex(Attributes input)
{
Varyings output = (Varyings)0;
UNITY_SETUP_INSTANCE_ID(input);
UNITY_TRANSFER_INSTANCE_ID(input, output);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(output);
VertexPositionInputs vertexInput = GetVertexPositionInputs(input.positionOS.xyz);
VertexNormalInputs normalInput = GetVertexNormalInputs(input.normalOS, input.tangentOS);
half3 viewDirWS = GetCameraPositionWS() - vertexInput.positionWS;
half3 vertexLight = VertexLighting(vertexInput.positionWS, normalInput.normalWS);
half fogFactor = ComputeFogFactor(vertexInput.positionCS.z);
output.uv = TRANSFORM_TEX(input.texcoord, _BaseMap);
#ifdef _NORMALMAP
output.normalWS = half4(normalInput.normalWS, viewDirWS.x);
output.tangentWS = half4(normalInput.tangentWS, viewDirWS.y);
output.bitangentWS = half4(normalInput.bitangentWS, viewDirWS.z);
#else
output.normalWS = NormalizeNormalPerVertex(normalInput.normalWS);
output.viewDirWS = viewDirWS;
#endif
OUTPUT_LIGHTMAP_UV(input.lightmapUV, unity_LightmapST, output.lightmapUV);
OUTPUT_SH(output.normalWS.xyz, output.vertexSH);
output.fogFactorAndVertexLight = half4(fogFactor, vertexLight);
#ifdef _ADDITIONAL_LIGHTS
output.positionWS = vertexInput.positionWS;
#endif
#if defined(_MAIN_LIGHT_SHADOWS) && !defined(_RECEIVE_SHADOWS_OFF)
output.shadowCoord = GetShadowCoord(vertexInput);
#endif
output.positionCS = vertexInput.positionCS;
return output;
}
代码中我们看出顶点着色器主要输出的信息有世界空间的法线、视线、切线、位置,裁剪空间的位置,光照贴图的UV、球谐等。
阴影坐标的计算在下面这个函数:
float4 GetShadowCoord(VertexPositionInputs vertexInput)
{
#if SHADOWS_SCREEN
return ComputeScreenPos(vertexInput.positionCS);
#else
return TransformWorldToShadowCoord(vertexInput.positionWS);
#endif
}
我们可以看到屏幕空间阴影的话返回的是屏幕坐标,否则返回的是光源投影空间的坐标(也称阴影空间)。
片元函数如下:
half4 LitPassFragment(Varyings input) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(input);
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
SurfaceData surfaceData;
InitializeStandardLitSurfaceData(input.uv, surfaceData);
InputData inputData;
InitializeInputData(input, surfaceData.normalTS, inputData);
half4 color = UniversalFragmentPBR(inputData, surfaceData.albedo, surfaceData.metallic, surfaceData.specular, surfaceData.smoothness, surfaceData.occlusion, surfaceData.emission, surfaceData.alpha);
color.rgb = MixFog(color.rgb, inputData.fogCoord);
return color;
}
可以看到核心的三个方法:InitializeStandardLitSurfaceData、InitializeInputData、UniversalFragmentPBR
inline void InitializeStandardLitSurfaceData(float2 uv, out SurfaceData outSurfaceData)
{
half4 albedoAlpha = SampleAlbedoAlpha(uv, TEXTURE2D_ARGS(_BaseMap, sampler_BaseMap));
outSurfaceData.alpha = Alpha(albedoAlpha.a, _BaseColor, _Cutoff);
half4 specGloss = SampleMetallicSpecGloss(uv, albedoAlpha.a);
outSurfaceData.albedo = albedoAlpha.rgb * _BaseColor.rgb;
#if _SPECULAR_SETUP
outSurfaceData.metallic = 1.0h;
outSurfaceData.specular = specGloss.rgb;
#else
outSurfaceData.metallic = specGloss.r;
outSurfaceData.specular = half3(0.0h, 0.0h, 0.0h);
#endif
outSurfaceData.smoothness = specGloss.a;
outSurfaceData.normalTS = SampleNormal(uv, TEXTURE2D_ARGS(_BumpMap, sampler_BumpMap), _BumpScale);
outSurfaceData.occlusion = SampleOcclusion(uv);
outSurfaceData.emission = SampleEmission(uv, _EmissionColor.rgb, TEXTURE2D_ARGS(_EmissionMap, sampler_EmissionMap));
}
上面这个初始化表面数据我们可以看到,主要是做了一些贴图采样,可以看到Metallic workflow是不需要specular信息的,反过来,Specular workflow不需要metallic信息。光滑度是从metallic或者specular贴图的alpha通道读取的。法线被转换成了切线空间,AO是从AO贴图上采样,我们看一下SampleOcclusion方法:
half SampleOcclusion(float2 uv)
{
#ifdef _OCCLUSIONMAP
// TODO: Controls things like these by exposing SHADER_QUALITY levels (low, medium, high)
#if defined(SHADER_API_GLES)
return SAMPLE_TEXTURE2D(_OcclusionMap, sampler_OcclusionMap, uv).g;
#else
half occ = SAMPLE_TEXTURE2D(_OcclusionMap, sampler_OcclusionMap, uv).g;
return LerpWhiteTo(occ, _OcclusionStrength);
#endif
#else
return 1.0;
#endif
}
可以看到AO读取的是AO贴图的g通道,而Metallic是放在金属度贴图的R通道,所以如果项目打算用Metallic workflow的小伙伴们可以把AO和金属度贴图合并到一张贴图哦。
然后是InitializeInputData方法:
void InitializeInputData(Varyings input, half3 normalTS, out InputData inputData)
{
inputData = (InputData)0;
#ifdef _ADDITIONAL_LIGHTS
inputData.positionWS = input.positionWS;
#endif
#ifdef _NORMALMAP
half3 viewDirWS = half3(input.normalWS.w, input.tangentWS.w, input.bitangentWS.w);
inputData.normalWS = TransformTangentToWorld(normalTS,
half3x3(input.tangentWS.xyz, input.bitangentWS.xyz, input.normalWS.xyz));
#else
half3 viewDirWS = input.viewDirWS;
inputData.normalWS = input.normalWS;
#endif
inputData.normalWS = NormalizeNormalPerPixel(inputData.normalWS);
viewDirWS = SafeNormalize(viewDirWS);
inputData.viewDirectionWS = viewDirWS;
#if defined(_MAIN_LIGHT_SHADOWS) && !defined(_RECEIVE_SHADOWS_OFF)
inputData.shadowCoord = input.shadowCoord;
#else
inputData.shadowCoord = float4(0, 0, 0, 0);
#endif
inputData.fogCoord = input.fogFactorAndVertexLight.x;
inputData.vertexLighting = input.fogFactorAndVertexLight.yzw;
inputData.bakedGI = SAMPLE_GI(input.lightmapUV, input.vertexSH, inputData.normalWS);
}
我们可以看到方法获取了一些用于PBR计算的基本属性。
最后就是关键的UniversalFragmentPBR方法:
half4 UniversalFragmentPBR(InputData inputData, half3 albedo, half metallic, half3 specular,
half smoothness, half occlusion, half3 emission, half alpha)
{
BRDFData brdfData;
InitializeBRDFData(albedo, metallic, specular, smoothness, alpha, brdfData);
Light mainLight = GetMainLight(inputData.shadowCoord);
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI, half4(0, 0, 0, 0));
half3 color = GlobalIllumination(brdfData, inputData.bakedGI, occlusion, inputData.normalWS, inputData.viewDirectionWS);
color += LightingPhysicallyBased(brdfData, mainLight, inputData.normalWS, inputData.viewDirectionWS);
#ifdef _ADDITIONAL_LIGHTS
uint pixelLightCount = GetAdditionalLightsCount();
for (uint lightIndex = 0u; lightIndex < pixelLightCount; ++lightIndex)
{
Light light = GetAdditionalLight(lightIndex, inputData.positionWS);
color += LightingPhysicallyBased(brdfData, light, inputData.normalWS, inputData.viewDirectionWS);
}
#endif
#ifdef _ADDITIONAL_LIGHTS_VERTEX
color += inputData.vertexLighting * brdfData.diffuse;
#endif
color += emission;
return half4(color, alpha);
}
可以看到首先准备了BRDF的数据,在准备BRDF数据的方法中我们看到有以下代码:
#ifdef _SPECULAR_SETUP
half reflectivity = ReflectivitySpecular(specular);
half oneMinusReflectivity = 1.0 - reflectivity;
outBRDFData.diffuse = albedo * (half3(1.0h, 1.0h, 1.0h) - specular);
outBRDFData.specular = specular;
#else
half oneMinusReflectivity = OneMinusReflectivityMetallic(metallic);
half reflectivity = 1.0 - oneMinusReflectivity;
outBRDFData.diffuse = albedo * oneMinusReflectivity;
outBRDFData.specular = lerp(kDieletricSpec.rgb, albedo, metallic);
#endif
这几句代码体现了不同的工作流对应的输入是如何被转化成同样的参数用来计算BRDF的。如果是Specular workflow,则会通过Specular贴图来代表高光反射的颜色,或者说高光在各个颜色通道相对于漫反射的权重。因为我们知道PBR是能量守恒的,假设输入的光照能量为1,那么漫反射+高光反射的能量就绝对不能超过1,所以漫反射会通过1-specularFactor的方式来计算权重。需要注意的是1-specularFactor只是一个形象的说法,并不是说就是这样计算的。具体计算方法我们参考一下OneMinusReflectivityMetallic方法:
// We'll need oneMinusReflectivity, so
// 1-reflectivity = 1-lerp(dielectricSpec, 1, metallic) = lerp(1-dielectricSpec, 0, metallic)
// store (1-dielectricSpec) in kDieletricSpec.a, then
// 1-reflectivity = lerp(alpha, 0, metallic) = alpha + metallic*(0 - alpha) =
// = alpha - metallic * alpha
half oneMinusDielectricSpec = kDieletricSpec.a;
return oneMinusDielectricSpec - metallic * oneMinusDielectricSpec;
从上面的代码我们可以看出,之所以不能简单的使用1-specularFactor,是因为当金属度为0时(即当为非导体时),反射率并不是0,也就是说金属度不等于反射率,虽然他们成正比例关系。我们最终需要通过反射率来表示高光反射的比率,而不是金属度。
同样,在Metallic workflow中也是一样的,因为金属度越高,反射的高光越强(即高光反射率越高),漫反射越弱。
需要注意的是Metallic workflow的Specular的计算方式,是通过非电解质(非金属)的颜色和albedo进行插值,也就是albedo不仅会影响漫反射,还会影响镜面反射(高光反射),但是Specular workflow则不会。
通过比较两种workflow我们可以发现,metallic工作流的优势在于不必关心高光贴图该怎么画,只用搞清楚这个材质的金属度参数就可以。而specular工作流必须根据材质需求正确的画出specular贴图,对美术有一定要求。speculr的优势在于可以通过specular贴图做一些高光的偏色,通过灵活的使用specular贴图来达到有偏色的高光反射,更容易做风格化。总之两种工作流各有优势,目前游戏主流还是采用Metallic workflow工作流。
接下来获取主光源,确定了一下是否有主光源参与光照贴图计算。这里用到了_MIXED_LIGHTING_SUBTRACTIVE keyword,这个keyword是在ForwardLights.Setup中调用的:
CoreUtils.SetKeyword(commandBuffer, ShaderKeywordStrings.MixedLightingSubtractive, renderingData.lightData.supportsMixedLighting && this.m_MixedLightingSetup == MixedLightingSetup.Subtractive);
而在InitializeLightConstants方法中我们可以看到当混合光照模式是Subtractive时,有以下指令:
m_MixedLightingSetup = MixedLightingSetup.Subtractive;
SubtractDirectMainLightFromLightmap方法将实时光阴影剔除出光照贴图的计算(实际上整个过程就是算阴影)。具体剔除的过程如下:
half shadowStrength = GetMainLightShadowStrength();
half contributionTerm = saturate(dot(mainLight.direction, normalWS));
half3 lambert = mainLight.color * contributionTerm;
half3 estimatedLightContributionMaskedByInverseOfShadow = lambert * (1.0 - mainLight.shadowAttenuation);
half3 subtractedLightmap = bakedGI - estimatedLightContributionMaskedByInverseOfShadow;
half3 realtimeShadow = max(subtractedLightmap, _SubtractiveShadowColor.xyz);
realtimeShadow = lerp(bakedGI, realtimeShadow, shadowStrength);
return min(bakedGI, realtimeShadow);
从代码中可以看出对于平行光照贡献的预估是用最简单的兰伯特光照模型计算的,因为lightmap上不用关心高光反射。然后通过阴影衰减反推出光照贡献度,乘上光照贡献颜色,再用烘焙光的颜色减去,算出了没有平行光的的光照贴图(即阴影和环境光),然后根据阴影强度做插值。
算完阴影后,就开始算GI了:
half3 GlobalIllumination(BRDFData brdfData, half3 bakedGI, half occlusion, half3 normalWS, half3 viewDirectionWS)
{
half3 reflectVector = reflect(-viewDirectionWS, normalWS);
half fresnelTerm = Pow4(1.0 - saturate(dot(normalWS, viewDirectionWS)));
half3 indirectDiffuse = bakedGI * occlusion;
half3 indirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfData.perceptualRoughness, occlusion);
return EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
}
通过方法里面准备的数据可以看出GI主要算的是环境漫反射和环境高光反射,漫反射通过光照贴图和环境遮蔽算出,高光反射由以下 方法算出:
half3 GlossyEnvironmentReflection(half3 reflectVector, half perceptualRoughness, half occlusion)
{
#if !defined(_ENVIRONMENTREFLECTIONS_OFF)
half mip = PerceptualRoughnessToMipmapLevel(perceptualRoughness);
half4 encodedIrradiance = SAMPLE_TEXTURECUBE_LOD(unity_SpecCube0, samplerunity_SpecCube0, reflectVector, mip);
#if !defined(UNITY_USE_NATIVE_HDR)
half3 irradiance = DecodeHDREnvironment(encodedIrradiance, unity_SpecCube0_HDR);
#else
half3 irradiance = encodedIrradiance.rbg;
#endif
return irradiance * occlusion;
#endif // GLOSSY_REFLECTIONS
return _GlossyEnvironmentColor.rgb * occlusion;
}
通过视野的反射方向采样cubemap得到环境光高光项入射光,采样结果同样也会和环境遮蔽相乘。所以我们可以看到,AO贴图影响的不仅仅是环境漫反射,还有环境高光反射。
最后进行环境光BRDF计算:
half3 EnvironmentBRDF(BRDFData brdfData, half3 indirectDiffuse, half3 indirectSpecular, half fresnelTerm)
{
half3 c = indirectDiffuse * brdfData.diffuse;
float surfaceReduction = 1.0 / (brdfData.roughness2 + 1.0);
c += surfaceReduction * indirectSpecular * lerp(brdfData.specular, brdfData.grazingTerm, fresnelTerm);
return c;
}
首先将brdf漫反射数据(非金属权重乘baseMap)和环境光漫反射(已经在烘焙光照的时候算好,从光照贴图中读取)相乘,得出最后环境光漫反射结果。接下来算环境高光反射:
indirectSpecular时环境入射光,brdfData.specular是高光颜色,brdfData.grazingTerm是光滑度+反射率(刚才忘记说了,嘻嘻),fresnelTerm是菲涅耳项,这样我们就能看出环境高光反射的公式:
高光输出 = 环境入射光*lerp(高光,(光滑度+反射率),菲涅耳项)/(粗糙度平方+1)
GI项算完了,接下来就是计算直射光的方法:
half3 DirectBDRF(BRDFData brdfData, half3 normalWS, half3 lightDirectionWS, half3 viewDirectionWS)
{
#ifndef _SPECULARHIGHLIGHTS_OFF
float3 halfDir = SafeNormalize(float3(lightDirectionWS) + float3(viewDirectionWS));
float NoH = saturate(dot(normalWS, halfDir));
half LoH = saturate(dot(lightDirectionWS, halfDir));
// GGX Distribution multiplied by combined approximation of Visibility and Fresnel
// BRDFspec = (D * V * F) / 4.0
// D = roughness^2 / ( NoH^2 * (roughness^2 - 1) + 1 )^2
// V * F = 1.0 / ( LoH^2 * (roughness + 0.5) )
// See "Optimizing PBR for Mobile" from Siggraph 2015 moving mobile graphics course
// https://community.arm.com/events/1155
// Final BRDFspec = roughness^2 / ( NoH^2 * (roughness^2 - 1) + 1 )^2 * (LoH^2 * (roughness + 0.5) * 4.0)
// We further optimize a few light invariant terms
// brdfData.normalizationTerm = (roughness + 0.5) * 4.0 rewritten as roughness * 4.0 + 2.0 to a fit a MAD.
float d = NoH * NoH * brdfData.roughness2MinusOne + 1.00001f;
half LoH2 = LoH * LoH;
half specularTerm = brdfData.roughness2 / ((d * d) * max(0.1h, LoH2) * brdfData.normalizationTerm);
// On platforms where half actually means something, the denominator has a risk of overflow
// clamp below was added specifically to "fix" that, but dx compiler (we convert bytecode to metal/gles)
// sees that specularTerm have only non-negative terms, so it skips max(0,..) in clamp (leaving only min(100,...))
#if defined (SHADER_API_MOBILE) || defined (SHADER_API_SWITCH)
specularTerm = specularTerm - HALF_MIN;
specularTerm = clamp(specularTerm, 0.0, 100.0); // Prevent FP16 overflow on mobiles
#endif
half3 color = specularTerm * brdfData.specular + brdfData.diffuse;
return color;
#else
return brdfData.diffuse;
#endif
}
注释中说明了公式,我们先往下看UniversalFragmentPBR方法:
#ifdef _ADDITIONAL_LIGHTS
uint pixelLightCount = GetAdditionalLightsCount();
for (uint lightIndex = 0u; lightIndex < pixelLightCount; ++lightIndex)
{
Light light = GetAdditionalLight(lightIndex, inputData.positionWS);
color += LightingPhysicallyBased(brdfData, light, inputData.normalWS, inputData.viewDirectionWS);
}
#endif
#ifdef _ADDITIONAL_LIGHTS_VERTEX
color += inputData.vertexLighting * brdfData.diffuse;
#endif
color += emission;
算完主光源之后开始算additionallight,最后附加上顶点光照和自发光,输出片元。
LitShader的ForwardPass我们大致过了一遍,对于shader中的计算先后顺序,输入信息如何影响最终渲染效果也有了大致了解,但是中间遇到的PBR相关公式以及做了哪些优化我们并没有细说,我们放到下一节一起来学习。