mirror of
https://github.com/dolphin-emu/dolphin.git
synced 2025-07-24 06:39:46 -06:00
Fixes spacing for "for", "while", "switch" and "if"
Also moved && and || to ends of lines instead of start. Fixed misc vertical alignments and some { needed newlining.
This commit is contained in:
@ -353,7 +353,7 @@ void FramebufferManager::ReinterpretPixelData(unsigned int convtype)
|
||||
|
||||
GLuint src_texture = 0;
|
||||
|
||||
if(m_msaaSamples > 1)
|
||||
if (m_msaaSamples > 1)
|
||||
{
|
||||
// MSAA mode, so resolve first
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, m_efbFramebuffer);
|
||||
|
@ -64,9 +64,9 @@ GLuint OpenGL_CompileProgram ( const char* vertexShader, const char* fragmentSha
|
||||
GLsizei stringBufferUsage = 0;
|
||||
glGetShaderiv(vertexShaderID, GL_COMPILE_STATUS, &Result);
|
||||
glGetShaderInfoLog(vertexShaderID, 1024, &stringBufferUsage, stringBuffer);
|
||||
if(Result && stringBufferUsage) {
|
||||
if (Result && stringBufferUsage) {
|
||||
ERROR_LOG(VIDEO, "GLSL vertex shader warnings:\n%s%s", stringBuffer, vertexShader);
|
||||
} else if(!Result) {
|
||||
} else if (!Result) {
|
||||
ERROR_LOG(VIDEO, "GLSL vertex shader error:\n%s%s", stringBuffer, vertexShader);
|
||||
} else {
|
||||
DEBUG_LOG(VIDEO, "GLSL vertex shader compiled:\n%s", vertexShader);
|
||||
@ -80,9 +80,9 @@ GLuint OpenGL_CompileProgram ( const char* vertexShader, const char* fragmentSha
|
||||
#if defined(_DEBUG) || defined(DEBUGFAST) || defined(DEBUG_GLSL)
|
||||
glGetShaderiv(fragmentShaderID, GL_COMPILE_STATUS, &Result);
|
||||
glGetShaderInfoLog(fragmentShaderID, 1024, &stringBufferUsage, stringBuffer);
|
||||
if(Result && stringBufferUsage) {
|
||||
if (Result && stringBufferUsage) {
|
||||
ERROR_LOG(VIDEO, "GLSL fragment shader warnings:\n%s%s", stringBuffer, fragmentShader);
|
||||
} else if(!Result) {
|
||||
} else if (!Result) {
|
||||
ERROR_LOG(VIDEO, "GLSL fragment shader error:\n%s%s", stringBuffer, fragmentShader);
|
||||
} else {
|
||||
DEBUG_LOG(VIDEO, "GLSL fragment shader compiled:\n%s", fragmentShader);
|
||||
@ -97,9 +97,9 @@ GLuint OpenGL_CompileProgram ( const char* vertexShader, const char* fragmentSha
|
||||
#if defined(_DEBUG) || defined(DEBUGFAST) || defined(DEBUG_GLSL)
|
||||
glGetProgramiv(programID, GL_LINK_STATUS, &Result);
|
||||
glGetProgramInfoLog(programID, 1024, &stringBufferUsage, stringBuffer);
|
||||
if(Result && stringBufferUsage) {
|
||||
if (Result && stringBufferUsage) {
|
||||
ERROR_LOG(VIDEO, "GLSL linker warnings:\n%s%s%s", stringBuffer, vertexShader, fragmentShader);
|
||||
} else if(!Result && !shader_errors) {
|
||||
} else if (!Result && !shader_errors) {
|
||||
ERROR_LOG(VIDEO, "GLSL linker error:\n%s%s%s", stringBuffer, vertexShader, fragmentShader);
|
||||
}
|
||||
#endif
|
||||
|
@ -77,7 +77,7 @@ void BindTargetFramebuffer ()
|
||||
|
||||
void BlitToScreen()
|
||||
{
|
||||
if(!s_enable) return;
|
||||
if (!s_enable) return;
|
||||
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
glViewport(0, 0, s_width, s_height);
|
||||
@ -101,7 +101,7 @@ void Update ( u32 width, u32 height )
|
||||
{
|
||||
ApplyShader();
|
||||
|
||||
if(s_enable && (width != s_width || height != s_height)) {
|
||||
if (s_enable && (width != s_width || height != s_height)) {
|
||||
s_width = width;
|
||||
s_height = height;
|
||||
|
||||
@ -133,7 +133,7 @@ void ApplyShader()
|
||||
// Fallback to shared user dir
|
||||
path = File::GetSysDirectory() + SHADERS_DIR DIR_SEP + g_ActiveConfig.sPostProcessingShader + ".glsl";
|
||||
}
|
||||
if(!File::ReadFileToString(path.c_str(), code)) {
|
||||
if (!File::ReadFileToString(path.c_str(), code)) {
|
||||
ERROR_LOG(VIDEO, "Post-processing shader not found: %s", path.c_str());
|
||||
return;
|
||||
}
|
||||
|
@ -47,9 +47,9 @@ void SHADER::SetProgramVariables()
|
||||
GLint PSBlock_id = glGetUniformBlockIndex(glprogid, "PSBlock");
|
||||
GLint VSBlock_id = glGetUniformBlockIndex(glprogid, "VSBlock");
|
||||
|
||||
if(PSBlock_id != -1)
|
||||
if (PSBlock_id != -1)
|
||||
glUniformBlockBinding(glprogid, PSBlock_id, 1);
|
||||
if(VSBlock_id != -1)
|
||||
if (VSBlock_id != -1)
|
||||
glUniformBlockBinding(glprogid, VSBlock_id, 2);
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ void SHADER::SetProgramBindings()
|
||||
glBindAttribLocation(glprogid, SHADER_NORM1_ATTRIB, "rawnorm1");
|
||||
glBindAttribLocation(glprogid, SHADER_NORM2_ATTRIB, "rawnorm2");
|
||||
|
||||
for(int i=0; i<8; i++) {
|
||||
for (int i=0; i<8; i++) {
|
||||
char attrib_name[8];
|
||||
snprintf(attrib_name, 8, "tex%d", i);
|
||||
glBindAttribLocation(glprogid, SHADER_TEXTURE0_ATTRIB+i, attrib_name);
|
||||
@ -98,7 +98,7 @@ void SHADER::SetProgramBindings()
|
||||
|
||||
void SHADER::Bind()
|
||||
{
|
||||
if(CurrentProgram != glprogid)
|
||||
if (CurrentProgram != glprogid)
|
||||
{
|
||||
glUseProgram(glprogid);
|
||||
CurrentProgram = glprogid;
|
||||
@ -107,7 +107,7 @@ void SHADER::Bind()
|
||||
|
||||
void ProgramShaderCache::UploadConstants()
|
||||
{
|
||||
if(PixelShaderManager::dirty || VertexShaderManager::dirty)
|
||||
if (PixelShaderManager::dirty || VertexShaderManager::dirty)
|
||||
{
|
||||
auto buffer = s_buffer->Map(s_ubo_buffer_size, s_ubo_align);
|
||||
|
||||
@ -210,7 +210,7 @@ bool ProgramShaderCache::CompileShader ( SHADER& shader, const char* vcode, cons
|
||||
GLuint vsid = CompileSingleShader(GL_VERTEX_SHADER, vcode);
|
||||
GLuint psid = CompileSingleShader(GL_FRAGMENT_SHADER, pcode);
|
||||
|
||||
if(!vsid || !psid)
|
||||
if (!vsid || !psid)
|
||||
{
|
||||
glDeleteShader(vsid);
|
||||
glDeleteShader(psid);
|
||||
@ -250,7 +250,7 @@ bool ProgramShaderCache::CompileShader ( SHADER& shader, const char* vcode, cons
|
||||
file << s_glsl_header << vcode << s_glsl_header << pcode << infoLog;
|
||||
file.close();
|
||||
|
||||
if(linkStatus != GL_TRUE)
|
||||
if (linkStatus != GL_TRUE)
|
||||
PanicAlert("Failed to link shaders!\nThis usually happens when trying to use Dolphin with an outdated GPU or integrated GPU like the Intel GMA series.\n\nIf you're sure this is Dolphin's error anyway, post the contents of %s along with this error message at the forums.\n\nDebug info (%s, %s, %s):\n%s",
|
||||
szTemp,
|
||||
g_ogl_config.gl_vendor,
|
||||
@ -308,7 +308,7 @@ GLuint ProgramShaderCache::CompileSingleShader (GLuint type, const char* code )
|
||||
file << s_glsl_header << code << infoLog;
|
||||
file.close();
|
||||
|
||||
if(compileStatus != GL_TRUE)
|
||||
if (compileStatus != GL_TRUE)
|
||||
PanicAlert("Failed to compile %s shader!\nThis usually happens when trying to use Dolphin with an outdated GPU or integrated GPU like the Intel GMA series.\n\nIf you're sure this is Dolphin's error anyway, post the contents of %s along with this error message at the forums.\n\nDebug info (%s, %s, %s):\n%s",
|
||||
type==GL_VERTEX_SHADER ? "vertex" : "pixel",
|
||||
szTemp,
|
||||
@ -373,7 +373,7 @@ void ProgramShaderCache::Init(void)
|
||||
{
|
||||
GLint Supported;
|
||||
glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &Supported);
|
||||
if(!Supported)
|
||||
if (!Supported)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "GL_ARB_get_program_binary is supported, but no binary format is known. So disable shader cache.");
|
||||
g_ogl_config.bSupportsGLSLCache = false;
|
||||
@ -406,14 +406,14 @@ void ProgramShaderCache::Shutdown(void)
|
||||
{
|
||||
for (auto& entry : pshaders)
|
||||
{
|
||||
if(entry.second.in_cache)
|
||||
if (entry.second.in_cache)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
GLint binary_size;
|
||||
glGetProgramiv(entry.second.shader.glprogid, GL_PROGRAM_BINARY_LENGTH, &binary_size);
|
||||
if(!binary_size)
|
||||
if (!binary_size)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
@ -25,9 +25,9 @@ public:
|
||||
|
||||
bool operator <(const SHADERUID& r) const
|
||||
{
|
||||
if(puid < r.puid) return true;
|
||||
if(r.puid < puid) return false;
|
||||
if(vuid < r.vuid) return true;
|
||||
if (puid < r.puid) return true;
|
||||
if (r.puid < puid) return false;
|
||||
if (vuid < r.vuid) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -141,9 +141,9 @@ RasterFont::RasterFont()
|
||||
glActiveTexture(GL_TEXTURE0+8);
|
||||
glBindTexture(GL_TEXTURE_2D, texture);
|
||||
u32* texture_data = new u32[char_width*char_count*char_height];
|
||||
for(u32 y=0; y<char_height; y++) {
|
||||
for(u32 c=0; c<char_count; c++) {
|
||||
for(u32 x=0; x<char_width; x++) {
|
||||
for (u32 y=0; y<char_height; y++) {
|
||||
for (u32 c=0; c<char_count; c++) {
|
||||
for (u32 x=0; x<char_width; x++) {
|
||||
bool pixel = (0 != (rasters[c][y] & (1<<(char_width-x-1))));
|
||||
texture_data[char_width*char_count*y+char_width*c+x] = pixel ? -1 : 0;
|
||||
}
|
||||
@ -195,22 +195,22 @@ void RasterFont::printMultilineText(const char *text, double start_x, double sta
|
||||
GLfloat x = GLfloat(start_x);
|
||||
GLfloat y = GLfloat(start_y);
|
||||
|
||||
for(size_t i=0; i<length; i++) {
|
||||
for (size_t i=0; i<length; i++) {
|
||||
u8 c = text[i];
|
||||
|
||||
if(c == '\n') {
|
||||
if (c == '\n') {
|
||||
x = GLfloat(start_x);
|
||||
y -= delta_y + border_y;
|
||||
continue;
|
||||
}
|
||||
|
||||
// do not print spaces, they can be skipped easily
|
||||
if(c == ' ') {
|
||||
if (c == ' ') {
|
||||
x += delta_x + border_x;
|
||||
continue;
|
||||
}
|
||||
|
||||
if(c < char_offset || c >= char_count+char_offset) continue;
|
||||
if (c < char_offset || c >= char_count+char_offset) continue;
|
||||
|
||||
vertices[usage++] = x;
|
||||
vertices[usage++] = y;
|
||||
@ -245,7 +245,7 @@ void RasterFont::printMultilineText(const char *text, double start_x, double sta
|
||||
x += delta_x + border_x;
|
||||
}
|
||||
|
||||
if(!usage) {
|
||||
if (!usage) {
|
||||
delete [] vertices;
|
||||
return;
|
||||
}
|
||||
@ -258,7 +258,7 @@ void RasterFont::printMultilineText(const char *text, double start_x, double sta
|
||||
|
||||
s_shader.Bind();
|
||||
|
||||
if(color != cached_color) {
|
||||
if (color != cached_color) {
|
||||
glUniform4f(uniform_color_id, GLfloat((color>>16)&0xff)/255.f,GLfloat((color>>8)&0xff)/255.f,GLfloat((color>>0)&0xff)/255.f,GLfloat((color>>24)&0xff)/255.f);
|
||||
cached_color = color;
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ int GetNumMSAASamples(int MSAAMode)
|
||||
samples = 1;
|
||||
}
|
||||
|
||||
if(samples <= g_ogl_config.max_samples) return samples;
|
||||
if (samples <= g_ogl_config.max_samples) return samples;
|
||||
|
||||
// TODO: move this to InitBackendInfo
|
||||
OSD::AddMessage(StringFromFormat("%d Anti Aliasing samples selected, but only %d supported by your GPU.", samples, g_ogl_config.max_samples), 10000);
|
||||
@ -172,7 +172,7 @@ int GetNumMSAACoverageSamples(int MSAAMode)
|
||||
default:
|
||||
samples = 0;
|
||||
}
|
||||
if(g_ogl_config.bSupportCoverageMSAA || samples == 0) return samples;
|
||||
if (g_ogl_config.bSupportCoverageMSAA || samples == 0) return samples;
|
||||
|
||||
// TODO: move this to InitBackendInfo
|
||||
OSD::AddMessage("CSAA Anti Aliasing isn't supported by your GPU.", 10000);
|
||||
@ -183,15 +183,15 @@ void ApplySSAASettings() {
|
||||
// GLES3 doesn't support SSAA
|
||||
if (GLInterface->GetMode() == GLInterfaceMode::MODE_OPENGL)
|
||||
{
|
||||
if(g_ActiveConfig.iMultisampleMode == MULTISAMPLE_SSAA_4X) {
|
||||
if(g_ogl_config.bSupportSampleShading) {
|
||||
if (g_ActiveConfig.iMultisampleMode == MULTISAMPLE_SSAA_4X) {
|
||||
if (g_ogl_config.bSupportSampleShading) {
|
||||
glEnable(GL_SAMPLE_SHADING_ARB);
|
||||
glMinSampleShadingARB(s_MSAASamples);
|
||||
} else {
|
||||
// TODO: move this to InitBackendInfo
|
||||
OSD::AddMessage("SSAA Anti Aliasing isn't supported by your GPU.", 10000);
|
||||
}
|
||||
} else if(g_ogl_config.bSupportSampleShading) {
|
||||
} else if (g_ogl_config.bSupportSampleShading) {
|
||||
glDisable(GL_SAMPLE_SHADING_ARB);
|
||||
}
|
||||
}
|
||||
@ -202,7 +202,7 @@ void GLAPIENTRY ErrorCallback( GLenum source, GLenum type, GLuint id, GLenum sev
|
||||
const char *s_source;
|
||||
const char *s_type;
|
||||
|
||||
switch(source)
|
||||
switch (source)
|
||||
{
|
||||
case GL_DEBUG_SOURCE_API_ARB: s_source = "API"; break;
|
||||
case GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB: s_source = "Window System"; break;
|
||||
@ -212,7 +212,7 @@ void GLAPIENTRY ErrorCallback( GLenum source, GLenum type, GLuint id, GLenum sev
|
||||
case GL_DEBUG_SOURCE_OTHER_ARB: s_source = "Other"; break;
|
||||
default: s_source = "Unknown"; break;
|
||||
}
|
||||
switch(type)
|
||||
switch (type)
|
||||
{
|
||||
case GL_DEBUG_TYPE_ERROR_ARB: s_type = "Error"; break;
|
||||
case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB: s_type = "Deprecated"; break;
|
||||
@ -222,7 +222,7 @@ void GLAPIENTRY ErrorCallback( GLenum source, GLenum type, GLuint id, GLenum sev
|
||||
case GL_DEBUG_TYPE_OTHER_ARB: s_type = "Other"; break;
|
||||
default: s_type = "Unknown"; break;
|
||||
}
|
||||
switch(severity)
|
||||
switch (severity)
|
||||
{
|
||||
case GL_DEBUG_SEVERITY_HIGH_ARB: ERROR_LOG(VIDEO, "id: %x, source: %s, type: %s - %s", id, s_source, s_type, message); break;
|
||||
case GL_DEBUG_SEVERITY_MEDIUM_ARB: WARN_LOG(VIDEO, "id: %x, source: %s, type: %s - %s", id, s_source, s_type, message); break;
|
||||
@ -277,7 +277,7 @@ void InitDriverInfo()
|
||||
vendor = DriverDetails::VENDOR_VIVANTE;
|
||||
|
||||
// Get device family and driver version...if we care about it
|
||||
switch(vendor)
|
||||
switch (vendor)
|
||||
{
|
||||
case DriverDetails::VENDOR_QUALCOMM:
|
||||
{
|
||||
@ -292,16 +292,16 @@ void InitDriverInfo()
|
||||
case DriverDetails::VENDOR_ARM:
|
||||
if (std::string::npos != srenderer.find("Mali-T6"))
|
||||
driver = DriverDetails::DRIVER_ARM_T6XX;
|
||||
else if(std::string::npos != srenderer.find("Mali-4"))
|
||||
else if (std::string::npos != srenderer.find("Mali-4"))
|
||||
driver = DriverDetails::DRIVER_ARM_4XX;
|
||||
break;
|
||||
case DriverDetails::VENDOR_MESA:
|
||||
{
|
||||
if(svendor == "nouveau")
|
||||
if (svendor == "nouveau")
|
||||
driver = DriverDetails::DRIVER_NOUVEAU;
|
||||
else if(svendor == "Intel Open Source Technology Center")
|
||||
else if (svendor == "Intel Open Source Technology Center")
|
||||
driver = DriverDetails::DRIVER_I965;
|
||||
else if(std::string::npos != srenderer.find("AMD") || std::string::npos != srenderer.find("ATI"))
|
||||
else if (std::string::npos != srenderer.find("AMD") || std::string::npos != srenderer.find("ATI"))
|
||||
driver = DriverDetails::DRIVER_R600;
|
||||
|
||||
int major = 0;
|
||||
@ -474,19 +474,19 @@ Renderer::Renderer()
|
||||
g_ogl_config.eSupportedGLSLVersion = GLSLES3;
|
||||
else
|
||||
{
|
||||
if(strstr(g_ogl_config.glsl_version, "1.00") || strstr(g_ogl_config.glsl_version, "1.10") || strstr(g_ogl_config.glsl_version, "1.20"))
|
||||
if (strstr(g_ogl_config.glsl_version, "1.00") || strstr(g_ogl_config.glsl_version, "1.10") || strstr(g_ogl_config.glsl_version, "1.20"))
|
||||
{
|
||||
PanicAlert("GPU: OGL ERROR: Need at least GLSL 1.30\n"
|
||||
"GPU: Does your video card support OpenGL 3.0?\n"
|
||||
"GPU: Your driver supports GLSL %s", g_ogl_config.glsl_version);
|
||||
bSuccess = false;
|
||||
}
|
||||
else if(strstr(g_ogl_config.glsl_version, "1.30"))
|
||||
else if (strstr(g_ogl_config.glsl_version, "1.30"))
|
||||
{
|
||||
g_ogl_config.eSupportedGLSLVersion = GLSL_130;
|
||||
g_Config.backend_info.bSupportsEarlyZ = false; // layout keyword is only supported on glsl150+
|
||||
}
|
||||
else if(strstr(g_ogl_config.glsl_version, "1.40"))
|
||||
else if (strstr(g_ogl_config.glsl_version, "1.40"))
|
||||
{
|
||||
g_ogl_config.eSupportedGLSLVersion = GLSL_140;
|
||||
g_Config.backend_info.bSupportsEarlyZ = false; // layout keyword is only supported on glsl150+
|
||||
@ -512,7 +512,7 @@ Renderer::Renderer()
|
||||
#endif
|
||||
int samples;
|
||||
glGetIntegerv(GL_SAMPLES, &samples);
|
||||
if(samples > 1)
|
||||
if (samples > 1)
|
||||
{
|
||||
// MSAA on default framebuffer isn't working because of glBlitFramebuffer.
|
||||
// It also isn't useful as we don't render anything to the default framebuffer.
|
||||
@ -531,7 +531,7 @@ Renderer::Renderer()
|
||||
}
|
||||
|
||||
glGetIntegerv(GL_MAX_SAMPLES, &g_ogl_config.max_samples);
|
||||
if(g_ogl_config.max_samples < 1)
|
||||
if (g_ogl_config.max_samples < 1)
|
||||
g_ogl_config.max_samples = 1;
|
||||
|
||||
UpdateActiveConfig();
|
||||
@ -599,12 +599,12 @@ Renderer::Renderer()
|
||||
glBlendColor(0, 0, 0, 0.5f);
|
||||
glClearDepthf(1.0f);
|
||||
|
||||
if(g_ActiveConfig.backend_info.bSupportsPrimitiveRestart)
|
||||
if (g_ActiveConfig.backend_info.bSupportsPrimitiveRestart)
|
||||
{
|
||||
if (GLInterface->GetMode() == GLInterfaceMode::MODE_OPENGLES3)
|
||||
glEnable(GL_PRIMITIVE_RESTART_FIXED_INDEX);
|
||||
else
|
||||
if(g_ogl_config.bSupportOGL31)
|
||||
if (g_ogl_config.bSupportOGL31)
|
||||
{
|
||||
glEnable(GL_PRIMITIVE_RESTART);
|
||||
glPrimitiveRestartIndex(65535);
|
||||
@ -988,7 +988,7 @@ u32 Renderer::AccessEFB(EFBAccessType type, u32 x, u32 y, u32 poke_data)
|
||||
// Scale the 32-bit value returned by glReadPixels to a 24-bit
|
||||
// value (GC uses a 24-bit Z-buffer).
|
||||
// TODO: in RE0 this value is often off by one, which causes lighting to disappear
|
||||
if(bpmem.zcontrol.pixel_format == PIXELFMT_RGB565_Z16)
|
||||
if (bpmem.zcontrol.pixel_format == PIXELFMT_RGB565_Z16)
|
||||
{
|
||||
// if Z is in 16 bit format you must return a 16 bit integer
|
||||
z = z >> 16;
|
||||
@ -1054,12 +1054,12 @@ u32 Renderer::AccessEFB(EFBAccessType type, u32 x, u32 y, u32 poke_data)
|
||||
{
|
||||
color = RGBA8ToRGB565ToRGBA8(color);
|
||||
}
|
||||
if(bpmem.zcontrol.pixel_format != PIXELFMT_RGBA6_Z24)
|
||||
if (bpmem.zcontrol.pixel_format != PIXELFMT_RGBA6_Z24)
|
||||
{
|
||||
color |= 0xFF000000;
|
||||
}
|
||||
if(alpha_read_mode.ReadMode == 2) return color; // GX_READ_NONE
|
||||
else if(alpha_read_mode.ReadMode == 1) return (color | 0xFF000000); // GX_READ_FF
|
||||
if (alpha_read_mode.ReadMode == 2) return color; // GX_READ_NONE
|
||||
else if (alpha_read_mode.ReadMode == 1) return (color | 0xFF000000); // GX_READ_FF
|
||||
else /*if(alpha_read_mode.ReadMode == 0)*/ return (color & 0x00FFFFFF); // GX_READ_00
|
||||
}
|
||||
|
||||
@ -1110,7 +1110,7 @@ void Renderer::SetViewport()
|
||||
}
|
||||
|
||||
// Update the view port
|
||||
if(g_ogl_config.bSupportViewportFloat)
|
||||
if (g_ogl_config.bSupportViewportFloat)
|
||||
{
|
||||
glViewportIndexedf(0, X, Y, Width, Height);
|
||||
}
|
||||
@ -1317,7 +1317,7 @@ void Renderer::SwapImpl(u32 xfbAddr, u32 fbWidth, u32 fbHeight,const EFBRectangl
|
||||
|
||||
const XFBSourceBase* xfbSource = nullptr;
|
||||
|
||||
if(g_ActiveConfig.bUseXFB)
|
||||
if (g_ActiveConfig.bUseXFB)
|
||||
{
|
||||
// Render to the real/postprocessing buffer now.
|
||||
PostProcessing::BindTargetFramebuffer();
|
||||
@ -1583,7 +1583,7 @@ void Renderer::SwapImpl(u32 xfbAddr, u32 fbWidth, u32 fbHeight,const EFBRectangl
|
||||
GL_REPORT_ERRORD();
|
||||
}
|
||||
|
||||
if(s_vsync != g_ActiveConfig.IsVSync())
|
||||
if (s_vsync != g_ActiveConfig.IsVSync())
|
||||
{
|
||||
s_vsync = g_ActiveConfig.IsVSync();
|
||||
GLInterface->SwapInterval(s_vsync);
|
||||
@ -1767,7 +1767,7 @@ void Renderer::FlipImageData(u8 *data, int w, int h, int pixel_width)
|
||||
// Flip image upside down. Damn OpenGL.
|
||||
for (int y = 0; y < h / 2; ++y)
|
||||
{
|
||||
for(int x = 0; x < w; ++x)
|
||||
for (int x = 0; x < w; ++x)
|
||||
{
|
||||
for (int delta = 0; delta < pixel_width; ++delta)
|
||||
std::swap(data[(y * w + x) * pixel_width + delta], data[((h - 1 - y) * w + x) * pixel_width + delta]);
|
||||
|
@ -66,7 +66,7 @@ static const u32 SYNC_POINTS = 16;
|
||||
void StreamBuffer::CreateFences()
|
||||
{
|
||||
fences = new GLsync[SYNC_POINTS];
|
||||
for(u32 i=0; i<SYNC_POINTS; i++)
|
||||
for (u32 i=0; i<SYNC_POINTS; i++)
|
||||
fences[i] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||
}
|
||||
void StreamBuffer::DeleteFences()
|
||||
@ -123,7 +123,7 @@ void StreamBuffer::AllocMemory(size_t size)
|
||||
|
||||
void StreamBuffer::Align(u32 stride)
|
||||
{
|
||||
if(m_iterator && stride) {
|
||||
if (m_iterator && stride) {
|
||||
m_iterator--;
|
||||
m_iterator = m_iterator - (m_iterator % stride) + stride;
|
||||
}
|
||||
@ -149,7 +149,7 @@ public:
|
||||
|
||||
std::pair<u8*, size_t> Map(size_t size, u32 stride) override {
|
||||
Align(stride);
|
||||
if(m_iterator + size >= m_size) {
|
||||
if (m_iterator + size >= m_size) {
|
||||
glBufferData(m_buffertype, m_size, nullptr, GL_STREAM_DRAW);
|
||||
m_iterator = 0;
|
||||
}
|
||||
@ -350,9 +350,9 @@ public:
|
||||
StreamBuffer* StreamBuffer::Create(u32 type, size_t size)
|
||||
{
|
||||
// without basevertex support, only streaming methods whith uploads everything to zero works fine:
|
||||
if(!g_ogl_config.bSupportsGLBaseVertex)
|
||||
if (!g_ogl_config.bSupportsGLBaseVertex)
|
||||
{
|
||||
if(!DriverDetails::HasBug(DriverDetails::BUG_BROKENBUFFERSTREAM))
|
||||
if (!DriverDetails::HasBug(DriverDetails::BUG_BROKENBUFFERSTREAM))
|
||||
return new BufferSubData(type, size);
|
||||
|
||||
// BufferData is by far the worst way, only use it if needed
|
||||
@ -360,7 +360,7 @@ StreamBuffer* StreamBuffer::Create(u32 type, size_t size)
|
||||
}
|
||||
|
||||
// Prefer the syncing buffers over the orphaning one
|
||||
if(g_ogl_config.bSupportsGLSync)
|
||||
if (g_ogl_config.bSupportsGLSync)
|
||||
{
|
||||
// try to use buffer storage whenever possible
|
||||
if (g_ogl_config.bSupportsGLBufferStorage &&
|
||||
@ -368,16 +368,16 @@ StreamBuffer* StreamBuffer::Create(u32 type, size_t size)
|
||||
return new BufferStorage(type, size);
|
||||
|
||||
// pinned memory is almost as fine
|
||||
if(g_ogl_config.bSupportsGLPinnedMemory &&
|
||||
if (g_ogl_config.bSupportsGLPinnedMemory &&
|
||||
!(DriverDetails::HasBug(DriverDetails::BUG_BROKENPINNEDMEMORY) && type == GL_ELEMENT_ARRAY_BUFFER))
|
||||
return new PinnedMemory(type, size);
|
||||
|
||||
// don't fall back to MapAnd* for nvidia drivers
|
||||
if(DriverDetails::HasBug(DriverDetails::BUG_BROKENUNSYNCMAPPING))
|
||||
if (DriverDetails::HasBug(DriverDetails::BUG_BROKENUNSYNCMAPPING))
|
||||
return new BufferSubData(type, size);
|
||||
|
||||
// mapping fallback
|
||||
if(g_ogl_config.bSupportsGLSync)
|
||||
if (g_ogl_config.bSupportsGLSync)
|
||||
return new MapAndSync(type, size);
|
||||
}
|
||||
|
||||
|
@ -84,8 +84,8 @@ TextureCache::TCacheEntry::~TCacheEntry()
|
||||
{
|
||||
if (texture)
|
||||
{
|
||||
for(auto& gtex : s_Textures)
|
||||
if(gtex == texture)
|
||||
for (auto& gtex : s_Textures)
|
||||
if (gtex == texture)
|
||||
gtex = 0;
|
||||
glDeleteTextures(1, &texture);
|
||||
texture = 0;
|
||||
@ -282,14 +282,17 @@ void TextureCache::TCacheEntry::FromRenderTarget(u32 dstAddr, unsigned int dstFo
|
||||
|
||||
glViewport(0, 0, virtual_width, virtual_height);
|
||||
|
||||
if(srcFormat == PIXELFMT_Z24) {
|
||||
if (srcFormat == PIXELFMT_Z24)
|
||||
{
|
||||
s_DepthMatrixProgram.Bind();
|
||||
if(s_DepthCbufid != cbufid)
|
||||
if (s_DepthCbufid != cbufid)
|
||||
glUniform4fv(s_DepthMatrixUniform, 5, colmat);
|
||||
s_DepthCbufid = cbufid;
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
s_ColorMatrixProgram.Bind();
|
||||
if(s_ColorCbufid != cbufid)
|
||||
if (s_ColorCbufid != cbufid)
|
||||
glUniform4fv(s_ColorMatrixUniform, 7, colmat);
|
||||
s_ColorCbufid = cbufid;
|
||||
}
|
||||
@ -391,7 +394,7 @@ TextureCache::TextureCache()
|
||||
s_DepthCopyPositionUniform = glGetUniformLocation(s_DepthMatrixProgram.glprogid, "copy_position");
|
||||
|
||||
s_ActiveTexture = -1;
|
||||
for(auto& gtex : s_Textures)
|
||||
for (auto& gtex : s_Textures)
|
||||
gtex = -1;
|
||||
}
|
||||
|
||||
@ -409,7 +412,7 @@ void TextureCache::DisableStage(unsigned int stage)
|
||||
void TextureCache::SetStage ()
|
||||
{
|
||||
// -1 is the initial value as we don't know which testure should be bound
|
||||
if(s_ActiveTexture != (u32)-1)
|
||||
if (s_ActiveTexture != (u32)-1)
|
||||
glActiveTexture(GL_TEXTURE0 + s_ActiveTexture);
|
||||
}
|
||||
|
||||
|
@ -106,7 +106,7 @@ void VertexManager::Draw(u32 stride)
|
||||
u32 max_index = IndexGenerator::GetNumVerts();
|
||||
GLenum primitive_mode = 0;
|
||||
|
||||
switch(current_primitive_type)
|
||||
switch (current_primitive_type)
|
||||
{
|
||||
case PRIMITIVE_POINTS:
|
||||
primitive_mode = GL_POINTS;
|
||||
@ -119,7 +119,7 @@ void VertexManager::Draw(u32 stride)
|
||||
break;
|
||||
}
|
||||
|
||||
if(g_ogl_config.bSupportsGLBaseVertex) {
|
||||
if (g_ogl_config.bSupportsGLBaseVertex) {
|
||||
glDrawRangeElementsBaseVertex(primitive_mode, 0, max_index, index_size, GL_UNSIGNED_SHORT, (u8*)nullptr+s_index_offset, (GLint)s_baseVertex);
|
||||
} else {
|
||||
glDrawRangeElements(primitive_mode, 0, max_index, index_size, GL_UNSIGNED_SHORT, (u8*)nullptr+s_index_offset);
|
||||
@ -132,7 +132,7 @@ void VertexManager::vFlush(bool useDstAlpha)
|
||||
GLVertexFormat *nativeVertexFmt = (GLVertexFormat*)g_nativeVertexFmt;
|
||||
u32 stride = nativeVertexFmt->GetVertexStride();
|
||||
|
||||
if(m_last_vao != nativeVertexFmt->VAO) {
|
||||
if (m_last_vao != nativeVertexFmt->VAO) {
|
||||
glBindVertexArray(nativeVertexFmt->VAO);
|
||||
m_last_vao = nativeVertexFmt->VAO;
|
||||
}
|
||||
|
Reference in New Issue
Block a user