Display Images on WebGL Canvas
First set up our index.html
with a canvas and a link to an external script, script.js
. We are setting its type to module
, since we will be creating other JavaScript files to organize our work.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>WebGL Image Viewer</title>
<style>
body { margin: 0; }
canvas { display: block; width: 100vw; height: 100vh; }
</style>
</head>
<body>
<canvas id="glcanvas"></canvas>
<script type="module" src="./script.js"></script>
</body>
</html>
Before creating our script.js
file, we are going to create our shaders and program in a program.js
file, and export the canvas element, the gl context, and the program. Note: we will change the shaders shortly but this is the most basic shader.
const vs = `
#version 300 es
layout(location = 0) in vec2 vertex;
void main() {
gl_Position = vec4(vertex, 0.0, 1.0);
}
`.trim();
const fs = `
#version 300 es
precision mediump float;
out vec4 outColor;
void main() {
outColor = vec4(1., 0., 0., 1.);
}
`.trim();
const canvas = document.getElementById("glcanvas");
const gl = canvas.getContext("webgl2");
function createShader(gl, type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
return shader;
}
function createProgram(gl, vs, fs) {
const program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
throw new Error(gl.getProgramInfoLog(program));
}
return program;
}
const program = createProgram(
gl,
createShader(gl, gl.VERTEX_SHADER, vs),
createShader(gl, gl.FRAGMENT_SHADER, fs)
);
export { gl, canvas, program }
Let's create our script.js
file to draw a rectangle in the top right corner of our canvas.
Note: when drawing rectangles in WebGL, you can draw two triangles that share 2 points or can use gl.TRIANGLE_STRIP that automatically connects the middle 2 points making a rectangle, so we shall take that approach.
import { gl, canvas, program } from './program.js';
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
// === Vertex Buffer ===
const vertices = new Float32Array([
0, 0,
1, 0,
0, 1,
1, 1,
]);
const vbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
const render = () => {
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(program);
gl.bindVertexArray(vao);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
}
render();
Now you should see a red rectangle in the top right of your screen. Next, we want this rectangle to display an image rather than just the color red. To do this, we need to update our shaders to take in a texture (image) and coordinates for how the image should be drawn on the rectangle.
The vertex shader is updated to take in the texture coordinates and pass it to the fragment shader:
#version 300 es
layout(location = 0) in vec2 vertex;
layout(location = 1) in vec2 texCoord;
out vec2 v_texCoord;
void main() {
gl_Position = vec4(vertex, 0.0, 1.0);
v_texCoord = texCoord;
}
The fragment shader pulls in the texture coordinates and an image uniform. From there, the texture function will sample the pixels from the image at the coordinates passed and return an vec4
rgba value.
#version 300 es
precision mediump float;
in vec2 v_texCoord;
uniform sampler2D uImage;
out vec4 outColor;
void main() {
outColor = texture(u_image, v_texCoord);
}
Now we just need an image and to pass it and texture coordinates to our program. Here is the sample image we will use below. To download, click the image.

Texture coordinates are normalized from 0 to 1 meaning that the bottom left of the image is at (0,0) at the top right is at (1,1) so lets update our script.js
to pass coordinates that cover our whole rectangle:
// === Texture Coordinate Buffer ===
const texCoords = new Float32Array([
0, 0,
1, 0,
0, 1,
1, 1,
]);
const tbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, tbo);
gl.bufferData(gl.ARRAY_BUFFER, texCoords, gl.STATIC_DRAW);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(1);
Passing the image to the program is a bit trickier. We must first create an HTMLImageElement that will load the image and we will create a texture from the loaded image and finally pass that texture to our program.
Create Texture
// === Image Load ===
let texture;
let img = new Image();
img.src = "webgl-image.png";
img.onload = () => {
texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA,
gl.UNSIGNED_BYTE, img);
// Texture Parameters
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// draw after loading
render();
};
There is a lot happening but here are the important bits:
- Flip the Y value because WebGL has the origin in the bottom left but the image has it's first pixel at the top left.
- Texture parameters are required to work, the first two handle determining what pixel to select and the last two revolve around selecting a pixel outside of the texture. Other options include the texture repeating.
- Move our render call until after the image is loaded and texture created.
Pass texture to program
const uImage = gl.getUniformLocation(program, "uImage");
// In render function
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(uImage, 0);
Code so far:
import { gl, canvas, program } from './program.js';
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
// === Vertex Buffer ===
const vertices = new Float32Array([
0, 0,
1, 0,
0, 1,
1, 1,
]);
const vbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
// === Texture Coordinate Buffer ===
const texCoords = new Float32Array([
0, 0,
1, 0,
0, 1,
1, 1,
]);
const tbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, tbo);
gl.bufferData(gl.ARRAY_BUFFER, texCoords, gl.STATIC_DRAW);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(1);
// === Image Load ===
let texture;
let img = new Image();
img.src = "webgl-image.png";
img.onload = () => {
texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA,
gl.UNSIGNED_BYTE, img);
// Texture Parameters
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// draw after loading
render();
};
const uImage = gl.getUniformLocation(program, "uImage");
const render = () => {
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(program);
gl.bindVertexArray(vao);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(uImage, 0);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
}
Now we have a blurry image that does not keep its original dimensions. To fix the issue of dimension, two updates are required: 1) update the vertices to match the dimensions of the image and 2) create a projection matrix to normalize the canvas size to match the WebGL clip space.
To update the vertices to match the image, create our vertices in the image onload
handler and update the width of our triangle strip to be the aspect ratio of the image. The aspect ratio is:
const aspect = img.width / img.height;
// === Vertex Buffer ===
const width = aspect;
const height = 1;
const vertices = new Float32Array([
0, 0,
width, 0,
0, height,
width, height,
]);
Next, let's update our vertex shader to use a projection matrix to account for canvas aspect ratio:
#version 300 es
layout(location = 0) in vec2 vertex;
layout(location = 1) in vec2 texCoord;
uniform mat4 uProjectio;
out vec2 v_texCoord;
void main() {
gl_Position = uProjection * vec4(vertex, 0.0, 1.0);
v_texCoord = texCoord;
}
Let's create the projection matrix, pass it to our program, and redraw on canvas resize:
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
const invAspect = Math.fround(canvas.height / canvas.width);
let projection = new Float32Array([
invAspect, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
]);
const uProjection = gl.getUniformLocation(program, "uProjection");
gl.uniformMatrix4fv(uProjection, false, projection);
window.addEventListener('resize', () => {
canvas.width = canvas.clientWidth;
canvas.height = canvas.clientHeight;
const invAspect = Math.fround(canvas.height / canvas.width);
projection = new Float32Array([
invAspect, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
]);
const uProjection = gl.getUniformLocation(program, "uProjection");
gl.uniformMatrix4fv(uProjection, false, projection);
render();
});
This is looking good but we can still do better. Let's use the device pixel ratio which will account for actual device pixels rather than css pixels. We can just multiply the canvas width and height by this ratio, like so:
const DPR = window.devicePixelRatio || 1;
canvas.width = canvas.clientWidth * DPR;
canvas.height = canvas.clientHeight * DPR;
// Note: update the resize handler as well:
window.addEventListener('resize', () => {
canvas.width = canvas.clientWidth * DPR;
canvas.height = canvas.clientHeight * DPR;
// ... rest of function
});
DPR vs No DPR
![]()
Here is the final code:
index.html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>WebGL Image Viewer</title>
<style>
body {
margin: 0;
}
canvas {
display: block;
width: 100vw;
height: 100vh;
}
</style>
</head>
<body>
<canvas id="glcanvas"></canvas>
<script type="module" src="./script.js"></script>
</body>
</html>
progam.js
const vs = `
#version 300 es
layout(location = 0) in vec2 vertex;
layout(location = 1) in vec2 texCoord;
uniform mat4 uProjection;
out vec2 v_texCoord;
void main() {
gl_Position = uProjection * vec4(vertex, 0.0, 1.0);
v_texCoord = texCoord;
}
`.trim();
const fs = `
#version 300 es
precision mediump float;
in vec2 v_texCoord;
uniform sampler2D uImage;
out vec4 outColor;
void main() {
outColor = texture(uImage, v_texCoord);
}
`.trim();
const canvas = document.getElementById("glcanvas");
const gl = canvas.getContext("webgl2");
function createShader(gl, type, source) {
const shader = gl.createShader(type);
gl.shaderSource(shader, source);
gl.compileShader(shader);
return shader;
}
function createProgram(gl, vs, fs) {
const program = gl.createProgram();
gl.attachShader(program, vs);
gl.attachShader(program, fs);
gl.linkProgram(program);
if (!gl.getProgramParameter(program, gl.LINK_STATUS)) {
throw new Error(gl.getProgramInfoLog(program));
}
return program;
}
const program = createProgram(
gl,
createShader(gl, gl.VERTEX_SHADER, vs),
createShader(gl, gl.FRAGMENT_SHADER, fs)
);
export { gl, canvas, program }
script.js
import { gl, canvas, program } from './program.js';
const DPR = window.devicePixelRatio || 1;
canvas.width = canvas.clientWidth * DPR;
canvas.height = canvas.clientHeight * DPR;
const vao = gl.createVertexArray();
gl.bindVertexArray(vao);
const texCoords = new Float32Array([
0, 0,
1, 0,
0, 1,
1, 1,
]);
const tbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, tbo);
gl.bufferData(gl.ARRAY_BUFFER, texCoords, gl.STATIC_DRAW);
gl.vertexAttribPointer(1, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(1);
let texture;
let img = new Image();
img.src = "webgl-image.png";
img.onload = () => {
const aspect = img.width / img.height;
const width = aspect;
const height = 1;
const vertices = new Float32Array([
0, 0,
width, 0,
0, height,
width, height,
]);
const vbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
gl.vertexAttribPointer(0, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(0);
texture = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
// Texture Parameters
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
render();
};
const uImage = gl.getUniformLocation(program, "uImage");
const invAspect = Math.fround(canvas.height / canvas.width);
let projection = new Float32Array([
invAspect, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
]);
const uProjection = gl.getUniformLocation(program, "uProjection");
const render = () => {
gl.viewport(0, 0, canvas.width, canvas.height);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.useProgram(program);
gl.bindVertexArray(vao);
gl.activeTexture(gl.TEXTURE0);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.uniform1i(uImage, 0);
gl.uniformMatrix4fv(uProjection, false, projection);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, 4);
}
window.addEventListener('resize', () => {
canvas.width = canvas.clientWidth * DPR;
canvas.height = canvas.clientHeight * DPR;
const invAspect = Math.fround(canvas.height / canvas.width);
projection = new Float32Array([
invAspect, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
]);
const uProjection = gl.getUniformLocation(program, "uProjection");
gl.uniformMatrix4fv(uProjection, false, projection);
render();
});