/r/opengl
News, information and discussion about OpenGL development.
News and discussion about OpenGL on all platforms.
Tutorials
Reference
Related subreddits
Programming Subreddits
/r/opengl
What do I need to learn to do to be able to do any UI work at a game company? I am thinking I should do like 50 sample projects in OpenGL, but I am not sure what I need to make. Also, does the answer change if I want to work for an aeronautical company?
Hi all, I'm having issues getting multiple uniform blocks to work properly with glBindBufferRange. The program seems to be ignoring all aspects of this call, and I'm not sure why. I have my shader blocks defined as:
layout(std140) uniform Matrices {
mat4 projectionMatrix;
mat4 viewMatrix;
} matrices;
layout(std140) uniform Util {
float elapsedTime;
} util;
and my shader and uniform buffer itself are defined as follows (Most of this is done in separate classes but for the sake of simplicity I've combined it into one file here):
glGenBuffers(1, &uboID);
glBindBuffer(GL_UNIFORM_BUFFER, uboID);
glBufferData(GL_UNIFORM_BUFFER, uboSize, nullptr, GL_STATIC_DRAW);
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexSource, NULL);
glCompileShader(vertexShader);
compileErrors(vertexShader, "VERTEX");
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &fragmentSource, NULL);
glCompileShader(fragmentShader);
compileErrors(fragmentShader, "FRAGMENT");
ID = glCreateProgram();
glAttachShader(ID, vertexShader);
glAttachShader(ID, fragmentShader);
glBindBuffer(GL_UNIFORM_BUFFER, se_uniformBuffer.getUboID());
glBindBufferRange(GL_UNIFORM_BUFFER, 0, uboID, 0, 128);
glBindBufferRange(GL_UNIFORM_BUFFER, 1, uboID, 128, 4);
GLuint matrixIndex = glGetUniformBlockIndex(ID, "Matrices");
GLuint utilIndex = glGetUniformBlockIndex(ID, "Util");
glUniformBlockBinding(ID, matrixIndex, 0);
glUniformBlockBinding(ID, utilIndex, 1);
glLinkProgram(ID);
I update the data in my buffer elsewhere in the code, and everything works fine on that end. My matrix block all works fine, but my util block just will not work at all. If I explicitly set the binding to 1 in the shader code, my elapsedTime will just be 0, and if I leave it as is, it pretends the offset is 0 and uses the first float of my first block. Furthermore, if I just change my shader to include the elapsed time in the matrix block like this:
layout(std140) uniform Matrices {
mat4 projectionMatrix;
mat4 viewMatrix;
float elapsedTime;
} matrices;
the elapsedtime variable works as intended, even though the block now exceeds the allocated 128 machine units defined in the glBindBufferRange call. I have no clue what call Im missing or what Im doing wrong, any and all help is appreciated. Thank you!
Goal: load multiple meshes in a single VBO (vertices), CBO (colors) and IBO (indices), leave a specific set of those untouched while iteratively replacing the rest.
Approach: Currently I have a single mesh (an OBJ file containing a cube with 8 vertices and 12 primitives as faces) that I load using `PyWavefront`. I then calculate my buffers to contain 10 cubes. Using PyGame I detect a key press input from the user and call an update on all 3 buffers (for the CBO I even change the color in order to see a visual change).
Code:
import pygame
from pathlib import Path
from pygame.locals import *
from import *
from import shaders
import numpy as np
import pywavefront
from math import sin, cos, tan, atan2
def calcFrustumScale(fFovDeg):
degToRad = np.pi * 2.0 / 360.0
fFovRad = fFovDeg * degToRad
return 1.0 / tan(fFovRad / 2.0)
def calcLerpFactor(fElapsedTime, fLoopDuration):
fValue = (fElapsedTime % fLoopDuration) / fLoopDuration
if fValue > 0.5:
fValue = 1.0 - fValue
return fValue * 2.0
def computeAngleRad(fElapsedTime, fLoopDuration):
fScale = np.pi * 2.0 / fLoopDuration
fCurrTimeThroughLoop = fElapsedTime % fLoopDuration
return fCurrTimeThroughLoop * fScale
def load_model(single_model_path: Path, color: np.array = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')):
scene = pywavefront.Wavefront(single_model_path, collect_faces=True)
model = {
'vertex' : np.array(scene.vertices, dtype='float32'),
'face' : np.array(scene.mesh_list[0].faces, dtype='uint32')
}
model['color'] = np.full((len(model['vertex']), 4), color, dtype='float32')
return model
def get_size(model: dict, stype: str='vertex'):
items = model[stype]
#return items.size * items.itemsize
return items.nbytes
def get_transform(elapsed_time):
angle_rad = computeAngleRad(elapsed_time, 2.0)
_cos = cos(angle_rad)
_sin = sin(angle_rad)
transform = np.identity(4, dtype='float32')
transform[0][0] = _cos
transform[2][0] = _sin
transform[0][2] = -_sin
transform[2][2] = _cos
# offset
transform[0][3] = 0.0 #-5.0
transform[1][3] = 0.0 #5.0
transform[2][3] = -5
return transform
# =======================================================
color_rnd = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')
print(color_rnd)
modelToCameraMatrixUnif = None
cameraToClipMatrixUnif = None
# Global display variables
cameraToClipMatrix = np.zeros((4,4), dtype='float32')
fFrustumScale = calcFrustumScale(45.0)
model = load_model('sample0.obj')
update_enabled = False
print('Model vertex bytesize:\t', get_size(model, 'vertex'))
print('Model face bytesize: \t', get_size(model, 'face'))
print('Model color bytesize: \t', get_size(model, 'color'))
CUBES_COUNT = 10
VBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'vertex')
VBO_SUB_BUFFER_SIZE = get_size(model, 'vertex')
print('VBO total bytesize:', VBO_BUFFER_SIZE)
IBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'face')
IBO_SUB_BUFFER_SIZE = get_size(model, 'face')
print('IBO total bytesize:', IBO_BUFFER_SIZE)
CBO_BUFFER_SIZE = CUBES_COUNT * get_size(model, 'color')
CBO_SUB_BUFFER_SIZE = get_size(model, 'color')
print('CBO total bytesize:', CBO_BUFFER_SIZE)
UPDATE_INTERVAL = 10 # Time interval between updates (in frames)
vertex_shader = '''
#version 330
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color;
smooth out vec4 theColor;
uniform mat4 cameraToClipMatrix;
uniform mat4 modelToCameraMatrix;
void main()
{
vec4 cameraPos = modelToCameraMatrix * position;
gl_Position = cameraToClipMatrix * cameraPos;
theColor = color;
}
'''
fragment_shader = '''
#version 330
smooth in vec4 theColor;
out vec4 outputColor;
void main()
{
outputColor = theColor;
}
'''
vbo = None
cbo = None
ibo = None
vao = None
program = None
def initialize():
global model
global vbo, cbo, ibo, vao
global program
global modelToCameraMatrixUnif, cameraToClipMatrixUnif, cameraToClipMatrix
pygame.init()
display = (800, 800)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
vertex_shader_id = shaders.compileShader(vertex_shader, GL_VERTEX_SHADER)
fragment_shader_id = shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER)
program = shaders.compileProgram(vertex_shader_id, fragment_shader_id)
glUseProgram(program)
glEnable(GL_CULL_FACE)
glCullFace(GL_BACK)
glFrontFace(GL_CW)
glEnable(GL_DEPTH_TEST)
glDepthMask(GL_TRUE)
glDepthFunc(GL_LEQUAL)
glDepthRange(0.0, 1.0)
modelToCameraMatrixUnif = glGetUniformLocation(program, "modelToCameraMatrix")
cameraToClipMatrixUnif = glGetUniformLocation(program, "cameraToClipMatrix")
fzNear = 1.0
fzFar = 100.0
# Note that this and the transformation matrix below are both
# ROW-MAJOR ordered. Thus, it is necessary to pass a transpose
# of the matrix to the glUniform assignment function.
cameraToClipMatrix[0][0] = fFrustumScale
cameraToClipMatrix[1][1] = fFrustumScale
cameraToClipMatrix[2][2] = (fzFar + fzNear) / (fzNear - fzFar)
cameraToClipMatrix[2][3] = -1.0
cameraToClipMatrix[3][2] = (2 * fzFar * fzNear) / (fzNear - fzFar)
glUseProgram(program)
glUniformMatrix4fv(cameraToClipMatrixUnif, 1, GL_FALSE, cameraToClipMatrix.transpose())
glUseProgram(0)
vbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(
GL_ARRAY_BUFFER,
model['vertex'].flatten(),
GL_STATIC_DRAW
)
glBindBuffer(GL_ARRAY_BUFFER, 0)
cbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, cbo)
glBufferData(
GL_ARRAY_BUFFER,
model['color'].flatten(),
GL_STATIC_DRAW
)
glBindBuffer(GL_ARRAY_BUFFER, 0)
ibo = glGenBuffers(1)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo)
glBufferData(
GL_ELEMENT_ARRAY_BUFFER,
model['face'].flatten(),
GL_STATIC_DRAW
)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
vao = glGenVertexArrays(1)
glBindVertexArray(vao)
vertex_dim = model['vertex'].shape[1]
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, vertex_dim, GL_FLOAT, GL_FALSE, 0, None)
color_dim = model['color'].shape[1]
glBindBuffer(GL_ARRAY_BUFFER, cbo)
glEnableVertexAttribArray(1)
glVertexAttribPointer(1, color_dim, GL_FLOAT, GL_FALSE, 0, None)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo)
glBindVertexArray(0)
def update_vbo(offset_prev, offset_curr):
global vbo
global model
print('(VBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + VBO_SUB_BUFFER_SIZE))
print('(VBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + VBO_SUB_BUFFER_SIZE))
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferSubData(GL_ARRAY_BUFFER, offset_prev, VBO_SUB_BUFFER_SIZE, None)
glBufferSubData(GL_ARRAY_BUFFER, offset_curr, VBO_SUB_BUFFER_SIZE, model['vertex'].flatten())
#glBufferSubData(GL_ARRAY_BUFFER, 0, VBO_SUB_BUFFER_SIZE, model['vertex'].flatten())
glBindBuffer(GL_ARRAY_BUFFER, 0)
def update_cbo(offset_prev, offset_curr, color: np.array = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')):
global cbo
global model
model['color'] = np.full((len(model['vertex']), 4), color, dtype='float32')
print('(CBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + CBO_SUB_BUFFER_SIZE))
print('(CBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + CBO_SUB_BUFFER_SIZE))
glBindBuffer(GL_ARRAY_BUFFER, cbo)
glBufferSubData(GL_ARRAY_BUFFER, offset_prev, CBO_SUB_BUFFER_SIZE, None)
glBufferSubData(GL_ARRAY_BUFFER, offset_curr, CBO_SUB_BUFFER_SIZE, model['color'].flatten())
#glBufferSubData(GL_ARRAY_BUFFER, 0, CBO_SUB_BUFFER_SIZE, model['color'].flatten())
glBindBuffer(GL_ARRAY_BUFFER, 0)
def update_ibo(offset_prev, offset_curr):
global ibo
global model
print('(IBO) Removing data at ({}:{})'.format(offset_prev, offset_prev + IBO_SUB_BUFFER_SIZE))
print('(IBO) Adding data at ({}:{})'.format(offset_curr, offset_curr + IBO_SUB_BUFFER_SIZE))
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo)
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset_prev, IBO_SUB_BUFFER_SIZE, None)
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, offset_curr, IBO_SUB_BUFFER_SIZE, model['face'].flatten())
#glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, IBO_SUB_BUFFER_SIZE, model['face'].flatten())
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
def render():
global vao, model
global modelToCameraMatrixUnif
glClearColor(0.0, 0.0, 0.0, 0.0)
glClearDepth(1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glUseProgram(program)
elapsed_time = pygame.time.get_ticks() / 1000.0
transform_func = get_transform
transformMatrix = transform_func(elapsed_time=elapsed_time)
glUniformMatrix4fv(modelToCameraMatrixUnif, 1, GL_FALSE, transformMatrix.transpose())
glBindVertexArray(vao)
index_count = model['face'].size
glDrawElements(GL_TRIANGLES, index_count, GL_UNSIGNED_INT, None)
glBindVertexArray(0)
pygame.display.flip()
def main():
global update_enabled
initialize()
frame_count = 0
offsets = {
'vbo' : {
'prev' : 0,
'curr' : 0
},
'cbo' : {
'prev' : 0,
'curr' : 0
},
'ibo' : {
'prev' : 0,
'curr' : 0
}
}
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
quit()
if event.key == pygame.K_u:
update_enabled = not update_enabled
if update_enabled:
print('Update triggered')
if update_enabled: # and frame_count % UPDATE_INTERVAL == 0:
idx = 3
offsets['vbo']['curr'] = idx * VBO_SUB_BUFFER_SIZE)
update_vbo(offsets['vbo']['prev'], offsets['vbo']['curr'])
offsets['vbo']['prev'] = offsets['vbo']['curr']
offsets['cbo']['curr'] = idx * CBO_SUB_BUFFER_SIZE
color = np.array([*np.random.uniform(0.0, 1.0, 3), 1.0], dtype='float32')
update_cbo(offsets['cbo']['prev'], offsets['cbo']['curr'], color)
offsets['cbo']['prev'] = offsets['cbo']['curr']
offsets['ibo']['curr'] = idx * IBO_SUB_BUFFER_SIZE
update_ibo(offsets['ibo']['prev'], offsets['ibo']['curr'])
offsets['ibo']['prev'] = offsets['ibo']['curr']
update_enabled = False
render()
frame_count += 1
if __name__ == '__main__':
main()OpenGL.GLOpenGL.GL
According to the PyOpenGL documentation my call of glBufferSubData()
should be correct. My print statement give me:
Model vertex bytesize: 96
Model face bytesize: 144
Model color bytesize: 128
VBO total bytesize: 960
IBO total bytesize: 1440
CBO total bytesize: 1280
which is correct. After all my cube has e.g. 8 faces, each with 3 float32 (4 bytes à piece) components, so 8*3*4 = 96.
However, the very first call (here to update the VBO), leads to error:
raise self._errorClass(
OpenGL.error.GLError: GLError(
err = 1281,
description = b'invalid value',
baseOperation = glBufferSubData,
pyArgs = (
GL_ARRAY_BUFFER,
768,
96,
array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1.,
1., -1., -1., -1., -1., -1., 1., 1....,
),
cArgs = (
GL_ARRAY_BUFFER,
768,
96,
array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1.,
1., -1., -1., -1., -1., -1., 1., 1....,
),
cArguments = (
GL_ARRAY_BUFFER,
768,
96,
array([ 1., 1., -1., 1., -1., -1., 1., 1., 1., 1., -1., 1., -1.,
1., -1., -1., -1., -1., -1., 1., 1....,
)
)
The invalid value error is related to incorrect offset or offset + size. I cannot see any issue with the data portion of the call since the that is the same data I use to fill my buffer at the beginning and it renders perfectly fine. Perhaps my logic for calculating the offsets for the colors and indices is incorrect but at least the VBO should work. Each sub-data chunk (representing the respective components - vertices, colors or indices) can be accessed by a given IDX multiplied by the respective sub-data byte size. This allows me to access all three types of data for the same object I want to render. If the IDX is anything but 0, my code crashes with the above mentioned error.
Hi everyone,
I am trying to convert mouse co-ordinates to world space co-ordinates in OpenGL for uni coursework. I am very much a beginner to OpenGL and don't understand much so I have been using numerous tutorials and threads but not seem to work, any help please?
Let me know if it annoys y’all for posting these sort of videos but I love sharing my progress!
I just used the GLFW documentation code but my glad library is making the window immediately close. I'm guessing I messed up the installation. Anyways, here's the code
Hello to the group,
I have a logarithmic depth conversion in my vertex shader for the depth value like this:
gl_Position.z = 2.0*log(gl_Position.w/near)/log(far/near) - 1;
gl_Position.z *= gl_Position.w;
And in a random fragment shader I am reading from the depth buffer to get the depth value in the range 0 - 1. How can I convert this to the real depth value since this is after the logarithmic conversion?
Thanks in advance
I'm attempting to write a function to convert screen coordinates to world coordinates, but I'm having issues. Here's my current version of the function:
glm::vec2 screen_to_world_coords(glm::ivec2 pixel, const perspective_camera& camera, float depth) {
const float z_ndc = (((depth-camera.position().z)/(camera.far() - camera.near())) - 0.5f) * 2.f;
const glm::vec4 mouse_ndc = glm::vec4((glm::vec2(pixel)/glm::vec2(ctx.window_size) - 0.5f) * 2.f, z_ndc, 1);
const glm::vec4 mouse_world = camera.inverse_pv() * mouse_ndc;
return mouse_world/mouse_world.z;
}
Using a debugger, I know the mouse normalized device coordinates are correct, but I seem to be getting the same very small numbers after applying the inverse projection and view matrix to the mouse_ndc no matter the depth.
Here's where the inverse matrix comes from.
glm::vec3 offset = (glm::vec3(this->m_screen_size, 0))/2.f;
glm::vec3 v_position = (this->m_position)/this->m_zoom;
auto view = glm::lookAt(
v_position,
v_position + glm::vec3(0, 0, -1),
glm::vec3(0, -1, 0)
);
auto mirr = glm::mat4(1);
mirr[0][0] = -1.0;
this->m_view_matrix = mirr * view;
this->m_inverse_pv = glm::inverse(this->m_projection_matrix * this->m_view_matrix);
At the top left corner of my screen I get -0.001646, 0.000914 as the result from this when querying a depth of 0 with a camera positioned at 0,0,350 and a z_near and z_far of 0.1 and 1000.
And maybe this is relevant:
glm::perspectiveFov(90.f, render_target_size.x, render_target_size.y, this->m_z_near, this->m_z_far);
I've been stuck on this for a few hours and seem to have come to a head here. Any help is appreciated and let me know if any more info is needed.
Thanks
EDIT:
I have a working solution.
After several more hours trying to understand the perspective transformation I realized I need to first multiply by the depth. But even then, using the inverse of the projection matrix did not work. Not sure why that is. Instead, I manually did the revere of the projection matrix on the normalized coordinates of the mouse. The result is very simple:
glm::vec2 screen_to_world_coords(glm::ivec2 pixel, const perspective_camera& camera, float depth) {
const glm::vec2 mouse_ndc = (glm::vec2(pixel)/glm::vec2(ctx.window_size) - 0.5f) * 2.f;
float z_offset = camera.position().z-depth;
const auto p = camera.projection();
return glm::vec3(
mouse_ndc.x * z_offset / p[0][0],
mouse_ndc.y * z_offset / p[1][1],
z_offset
) - camera.position();
}
Hey everyone, I have been creating some animations using OpenGL (actually a library but it’s OpenGL under the hood). My strategy for UI and the animations has been to draw everything to a render texture, then draw that to the screen at whatever size the window is (while respecting aspect ratio etc). Anyway I have noticed on mobile when the screen/texture gets too small the lines start to disappear and the text starts to look strange. I think it’s as simple as not having enough pixels in the smaller space to draw everything in the texture (can’t draw 0.2 pixels). I was wondering if there are any strategies that people often use for this same idea? I am just trying to find an easy way to draw and have it look the same at all screen sizes. Is a render texture the wrong way to go about this? Any help is much appreciated
I've created a simple program to demonstrate this issue. I'm very new to OpenGL and hope that someone can explain why it behaves this way.
The app does the following:
I've tried posting this in a few different places looking for help and no one wants to touch it. I'm either a) doing something wrong, or b) completely mistaken about how OpenGL works.
hi guys , i'm doing tic tac toe game with c++ and opengl I wanted to apply scaling so I thought about when I click the mouse to enter X for example it starts small the getting bigger to suitable size , the same for the O , I did it and used chatgpt but it doesn't work can somebody help me ?
i am haveing an open gl error 45545 faild to find suitable pixel format can anyone help me i am useing an mac intel hd graphics card with catalina OS
I’m know nothing about graphics programming, at all. Literally nothing. And I’ve been told if I don’t know graphics programming I will never learn OpenGL. But I’m kind of in mental loop. Like “I hate how this shit is so hard, but I want to learn it and I don’t want to give up… I WANT to give up, but I’ll try to dig deeper tomorrow”. How can I learn the concepts and the overall fundamentals of OpenGL.
I know c++, www.learncpp.com is the best thing I’ve ever seen in my life, I like how it explains things deeply. And I randomly checked out www.learnwebl.com and they give a complete overview fundamentals explanation of computer graphics as well as definitions. But OpenGL.. man…. Holy moly… so I have 2 questions.
In OpenGL, is EVERYTHING functions and parameters given by the header file?
And Where can I learn the FUNDAMENTALS of OpenGL. You know that 1+1 equals 2? And you know that if I’m someone asks you “what’s 28 plus 1” you’re going to just say 29, because you know that the mathematical fundamental that 1 is a singular digit.
When I read the OpenGL documentation it feels like I’m learning how to build a rocket with assembly.m
I'd like to preface this by saying that I'm only 2-3 weeks deep into making this project and because of this I only consider myself a beginner. This project begun as a strictly only C project but because of advice from people from this subreddit, I jumped to C++.I want to ask - is the code structured good enough (I know that the examples are in the same folder as the framework, and it is not good), also is the resource management system good? I used to separate hashmaps since I tried to use variants however they were really messy.
Repository
Hi guys here's another update of prisma engine, recently I've added cascaded shadow maps to my engine and I would like to showcase them on NVIDIA Orca scene.
Here's the GitHub repo:
https://github.com/deni2312/prisma-engine
Hope it's interesting
I've been learning the C programming language,and i want to learn openGl, i heard that learnopengl.com is one of the best resources out there,but i found it uses C++.can i still follow it using C ,or should i learn C++ in order to keep up with the book?
Hi all,
I'm very beginner onto GLSL and I try to understand the logical behind that.
I want to customise a fragment code that I imported onto my software (Lively), to make the background transparent and also manage the opacity of the circles. I would like also to add a component to import any image as a background, but it's secondary.
All the edits will be kept for my personal use !
Thank you very much !
I was not expecting OpenGl to be THAT hard, there’s so many functions and parameters I have to learn in OpenGL. It seems impossible to learn them all. Does anyone have any beginner friendly resources to learn OpenGL? Or should I just stick with OpenGL. ( www.learnopengl.com)
My first week learning c++ was hard, yes. But I got better and now I understand most code just by looking at it. I’ve been learning c++ for 2 month for now. But OpenGL is just 100000 functions with needed parameters…
Would it be best to keep swimming in this ocean full of waves that have immense force, or should I just stick to a boat, and glide my way through 😭
I want to display on the top right of the screen a timer counting down
I was reading Chapter 27. Motion Blur as a Post-Processing Effect
from GPU Gems 3
and trying to convert that theory into something that works with OpenGL and GLSL.
My game uses a multisampled FBO for post processing. I tried addimg the ability for that FBO to store a depth texture which I'll use in my post processing shader for processing that motion blur (using that depth/velocity texture).
The issue is that all I get is a black screen. I'm not sure what I'm doing wrong. I even tried running the game with RenderDoc, but I can't seem to figure it out.
I also had a question:
Do I need to render my scene similar to what I did with my ShadowMap? First I render the scene from the light's point of view and then I render the scene normally and apply the shadows using that depth texture.
Would I have to do something similar except from the actual camera's point of view (it's a first person game)? So in other words, I'd have to render the scene 3 times?
I'm not doing this at the moment. Let me show you all what I'm doing actually:
This is how I create my FBO:
void createFBO(FBO& fbo, FBO& intermediateFBO, unsigned int frameWidth, unsigned int frameHeight) {
unsigned int msaaSamples = 4;
glGenFramebuffers(1, &fbo.buffer);
glBindFramebuffer(GL_FRAMEBUFFER, fbo.buffer);
// Color Attachment
glGenTextures(1, &fbo.colorTextureBuffer);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, fbo.colorTextureBuffer);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, msaaSamples, GL_RGB, frameWidth, frameHeight, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, fbo.colorTextureBuffer, 0);
// Render Buffer
glGenRenderbuffers(1, &fbo.renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, fbo.renderBuffer);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, msaaSamples, GL_DEPTH24_STENCIL8, frameWidth, frameHeight);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, fbo.renderBuffer);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::cout << "Error, FrameBuffer not complete." << std::endl;
}
glGenFramebuffers(1, &intermediateFBO.buffer);
glBindFramebuffer(GL_FRAMEBUFFER, intermediateFBO.buffer);
glGenTextures(1, &intermediateFBO.colorTextureBuffer);
glBindTexture(GL_TEXTURE_2D, intermediateFBO.colorTextureBuffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, frameWidth, frameHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, intermediateFBO.colorTextureBuffer, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::cout << "Error, Intermediate FrameBuffer not complete." << std::endl;
}
// Motion Blur Buffer
glGenTextures(1, &intermediateFBO.motionBlurTextureBuffer);
glBindTexture(GL_TEXTURE_2D, intermediateFBO.motionBlurTextureBuffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, frameWidth, frameHeight, 0, GL_RED, GL_FLOAT, NULL); // Use GL_R32F for single-component texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, GL_TEXTURE_2D, intermediateFBO.motionBlurTextureBuffer, 0); // Attach as color attachment 1
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) {
std::cout << "Error, Intermediate FrameBuffer not complete." << std::endl;
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
This is the post processing fragment shader:
#version 420 core
out vec4 FragColor;
in vec2 TexCoords;
in vec2 FragCoord;
uniform sampler2D screenTexture;
uniform sampler2D depthTexture;
void main() {
float depth = texture(depthTexture, gl_FragCoord.xy / textureSize(depthTexture, 0)).r;
FragColor = vec4(vec3(depth), 1.0);
}
If I use the screenTexture
with FragColor
instead, I can see my scene perfectly. I'm able to apply all the post-precessing effects I need, but depthTexture
doesn't work.
I also made sure to get those locations right:
shader = &resourceManager.findShaderByName("post");
GL::API::useProgram(shader->getProgram());
timeLocation = glGetUniformLocation(shader->getProgram(), "time");
screenSizeLocation = glGetUniformLocation(shader->getProgram(), "screenSize");
GLuint screenTextureLocation = glGetUniformLocation(shader->getProgram(), "screenTexture");
GLuint depthTextureLocation = glGetUniformLocation(shader->getProgram(), "depthTexture");
GL::API::setInt(screenTextureLocation, 0);
GL::API::setInt(depthTextureLocation, 1);
GL::API::useProgram(0);
and when it's time to render the post processing quad I do the following:
glActiveTexture(GL_TEXTURE0);
GL::API::bindTexture(intermediateFBO.colorTextureBuffer);
glActiveTexture(GL_TEXTURE1);
GL::API::bindTexture(intermediateFBO.motionBlurTextureBuffer);
GL::API::clearColor();
glViewport(0, 0, width, height);
GL::API::bindMesh(quadVAO);
GL::API::useProgram(shader->getProgram());
UI::draw();
Oh and one last thing, this is what my main render function looks like:
// Shadows
shadowRenderer.begin(sun); // This is another fbo
drawScene(time, resourceManager, worldManager, shadowRenderer.getModelLocation());
shadowRenderer.end();
postProcessor.begin();
phongRenderer.begin(game.windowWidth, game.windowHeight, camera, sun, shadowRenderer.getLightSpace(), shadowRenderer.getDepthTexture(), time.getNow());
drawScene(time, resourceManager, worldManager, phongRenderer.getModelLocation());
phongRenderer.end();
postProcessor.end(game.windowWidth, game.windowHeight);
GL::API::enableDepthTest(false);
postProcessor.render(game.windowWidth, game.windowHeight, time.getNow());
Thanks!
Hello.
I'm working on some side project that is a solar system simulation and don't know one thing.
I have a textured model of earth, when i put a texture onto earth sphere this texture is rotated for about 90 degrees - north pole is around equator - and i want to rotate it, so ive created a rotation matrix that do it and i write it as "base_model_matrix" sphere class member.
Next i want the planet to rotate around Y axis so i take the "base_model_matrix" created previously and rotate this matrix for some degrees and around Y axis, but this rotation axis is totally different than Y because i previously rotated the model for about 90 degrees so as far as i understand, the basis of coordinate system have changed.
Can anyone give me some advice how to make these kind of multiple rotations?
I'm trying to load models in plain c rather than c++, and most tutorials out there use c++ and std::vectors. I'm wondering if there's a way to replace that with c pointers/arrays since it has no default vector type?
Hello,
I have a problem with OpenGL regarding glRotatef. It seems like this function doesn't exist.
Here's g++ output:
main.cpp: In function ‘int main()’:
main.cpp:132:9: error: ‘glRotatef’ was not declared in this scope
132 | glRotatef(45.0f, 0.0f, 0.0f, 1.0f);
| ^~~~~~~~~
make: *** [<builtin>: main.o] Error 1
my includes:
#include <fstream>
#include <sstream>
#include <vector>
#include <cstdio>
#include <cstring>
#include "glad.h"
#include <GLFW/glfw3.h>
and OpenGL version:
OpenGL version string: 3.3 (Compatibility Profile) Mesa 20.3.5
I've checked GL/gl.h
header and it's right there in front of me. including GL/gl.h
didn't help though. what am I doing wrong?
Thanks!
I'm asking since I end up including glad
in header files, but I'd like to include all the OpenGL stuff in implementation files and just use unsigned int
in the header files.
That way I don't need to expose all the OpenGL glad code to my game.
I tried reading up on the topic, and what I learned was that these data types are used so these numbers are of the same type on any computer.
I'm trying to move my project from runtime compiled shaders to precompiled SPIRV. It was going well until I moved from an NVidia GPU to an AMD GPU (Windows 11, Radeon RX 6800S). I'm using plain uniforms for my transformation matrices. When I use glGetActiveUniform to get their type on NVidia, I get GL_FLOAT_MAT4. But when I do it on AMD, I get GL_FLOAT_VEC4 of size 4.
I don't want to have to make weird assumptions about mat4 being an array of vec4 only on certain systems. I also don't want to just move all mat4s to arrays of vec4, since this means I have to do extra work with uniform layout locations in the shaders. I could switch to some alternative way to pass my matrices in like SSBO, but I'd rather not yet, and this seems like basic usage that should work fine. I'm assuming I goofed something up and not a driver bug, since it's such a basic problem. I created a minimal repro case and pushed it to a git repo. What might cause this?
https://github.com/cubeleo/GLSPIRVMatrixBugAMD.git
Thanks for taking a look if you do!
P.S. I decided not to use glew or glad so I'm loading GL entry points manually and there is a bit more boiler plate than I'd like here. Also, make sure to git clone --recursive so you get glfw.
I have an orthographic camera and a quad that is 1 unit size in object space. I apply these translations:
Translation: (1, 0, 0)
Scale: (1, 1, 1)
RotationZ: 0
The problem is that my quad when moved 1 unit goes to the edge of the screen as shown in the attached figure. Eighter there is a silly mistake or I don't understand something...
Here is my code:
float aspect = (float)width / (float)height;
if (aspect < 1)
{
aspect = (float)height / (float)width;
}
void Camera::UpdateProjection(float aspectRatio)
{
float top = 10.f / 2; // top 5
float right = top * aspectRatio; // right 8
_projection = glm::ortho(-right, right, -top, top, -100.0f, 100.0f);
}
glm::mat4 Camera::GetVP()
{
glm::mat4 view = glm::mat4(1);
view = glm::translate(view, _cameraPos);
return _projection * view;
}
glm::mat4 GetModel()
{
glm::mat4 model = glm::mat4(1);
model = glm::translate(model, Position); // position (1, 0, 0)
model = glm::scale(model, Scale); // scale (1, 1, 1)
model = glm::rotate(model, glm::radians(RotationZ), glm::vec3(0, 0, 1));
return model;
}
void Draw()
{
Shader->Use();
Shader->SetMatrix4("MVP", parent->transform->GetModel() * Game::Get().ActiveCamera->GetVP());
Shader->SetMatrix4("Model", parent->transform->GetModel());
glBindVertexArray(_VAO);
glDrawElements(GL_TRIANGLES, _indicesCount, GL_UNSIGNED_INT, 0);
}
#version 410 core
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec2 UV;
uniform mat4 MVP;
uniform mat4 Model;
void main()
{
gl_Position = MVP * vec4(Pos, 1.0f);
}
I have an orthographic camera. I'm rendering a quad that is 1 unit size in object space.
I apply these transformations:
Position: (1, 0, 0)
Scale: (1, 1, 1)
zRotation: 0
The problem is that when I translate my quad 1 unit to the right it moves to the edge of the screen as shown in the picture. I'm trying to pinpoint the problem however I can't find it.
Code:glm::mat4 Camera::GetVP()
{
glm::mat4 view = glm::mat4(1);
view = glm::translate(view, _cameraPos);
return _projection * view;
}
void Camera::UpdateProjection(float aspectRatio)
{
float top = 10.f / 2;
float right = top * aspectRatio;
_projection = glm::ortho(-right, right, -top, top, -100.0f, 100.0f);
}
// Aspect Calculationsfloat aspect = (float)width / (float)height;
if (aspect < 1)
aspect = (float)height / (float)width;
// DrawShader->Use();
Shader->SetMatrix4("MVP", parent->transform->GetModel() * Game::Get().ActiveCamera->GetVP());
Shader->SetMatrix4("Model", parent->transform->GetModel());
glBindVertexArray(_VAO);
glDrawElements(GL_TRIANGLES, _indicesCount, GL_UNSIGNED_INT, 0);
// Shader
#shader vertex#version 410 core
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec2 UV;
uniform mat4 MVP;
uniform mat4 Model;
out vec2 uv;
void main()
{
gl_Position = MVP * vec4(Pos, 1.0f);
uv = UV;
}