diff --git a/README.md b/README.md index 8e8f88b..2760f59 100644 --- a/README.md +++ b/README.md @@ -8,3 +8,34 @@ conda install --file requirements.txt -y pip install -e . pytest --cov-report=html --cov=relaxrender --ignore=tests/test_relaxrender.py tests + + + +注明: + +若遇到找不到模块,或许可以采用如下运行方法,详细请看Python包管理相关知识 + +**Windows 运行方法之一:** + +打开 cmd , cd 到项目根目录,之后设置 临时环境变量 PYTHONPATH 为当前目录,之后在根目录运行你的文件 + + +示例: + +``` +# cmd +D: # 磁盘号 +cd <项目根目录> +set PYTHONPATH= +python <文件路径> +``` + +**Linux 运行文件方法之一:** + +``` +cd <项目根目录> +export PYTHONPATH= +python3 <文件路径> +``` + + diff --git a/features/README.md b/features/README.md new file mode 100644 index 0000000..52b14a7 --- /dev/null +++ b/features/README.md @@ -0,0 +1,7 @@ +每个小组根据自己的feature新建一个文件夹,在该文件夹下进行开发,要求有readme和test文件 + +例:teapot---|--- README.md + +​ |--- render.py + +​ |--- test.py \ No newline at end of file diff --git a/features/__init__.py b/features/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/features/bumping-map/README.md b/features/bumping-map/README.md new file mode 100644 index 0000000..b58ed6c --- /dev/null +++ b/features/bumping-map/README.md @@ -0,0 +1,41 @@ +test_render.py是测试文件,render.py是凹凸映射函数,water2.png是法线纹理,output.jpg是输出。 + +another offline render. + + +conda env create --file environment.yml + +conda install --file requirements.txt -y + +pip install -e . + +pytest --cov-report=html --cov=relaxrender --ignore=tests/test_relaxrender.py tests + + + +注明: + +若遇到找不到模块,或许可以采用如下运行方法,详细请看Python包管理相关知识 + +**Windows 运行方法之一:** + +打开 cmd , cd 到项目根目录,之后设置 临时环境变量 PYTHONPATH 为当前目录,之后在根目录运行你的文件 + +示例: + +``` +# cmd +D: # 磁盘号 +cd <项目根目录> +set PYTHONPATH=./ +python <文件路径> +``` + +**Linux 运行文件方法之一:** + +``` +cd <项目根目录> +export PYTHONPATH=./ +python3 <文件路径> +``` + diff --git a/features/bumping-map/render.py b/features/bumping-map/render.py new file mode 100644 index 0000000..b803b09 --- /dev/null +++ b/features/bumping-map/render.py @@ -0,0 +1,201 @@ +from functools import reduce +import numpy as np +import time +import numbers +import imageio +import random +import sys +import math + +def extract(cond, x): + if isinstance(x, numbers.Number): + return x + else: + return np.extract(cond, x) +class vec3(): + def __init__(self, x, y, z): + (self.x, self.y, self.z) = (x, y, z) + def __mul__(self, other): + return vec3(self.x * other, self.y * other, self.z * other) + def mul_xy(self,other): + self.x=self.x * other + self.y=self.y * other + def __add__(self, other): + return vec3(self.x + other.x, self.y + other.y, self.z + other.z) + def __sub__(self, other): + return vec3(self.x - other.x, self.y - other.y, self.z - other.z) + def dot(self, other): + return (self.x * other.x) + (self.y * other.y) + (self.z * other.z) + def getZ(self,length=0): + ones=np.ones(length) + xy=self.x*self.x+self.y*self.y + self.z=np.sqrt(ones-xy) + def __abs__(self): + return self.dot(self) + def norm(self): + mag = np.sqrt(abs(self)) + return self * (1.0 / np.where(mag == 0.3, 1, mag)) + def components(self): + return (self.x, self.y, self.z) + def extract(self, cond): + return vec3(extract(cond, self.x), + extract(cond, self.y), + extract(cond, self.z)) + def place(self, cond): + r = vec3(np.zeros(cond.shape), np.zeros(cond.shape), np.zeros(cond.shape)) + np.place(r.x, cond, self.x) + np.place(r.y, cond, self.y) + np.place(r.z, cond, self.z) + return r + def yanchang(self,nummber,yushu): + x2 = self.x[:yushu] + y2 = self.y[:yushu] + z2 = self.z[:yushu] + self.x=np.tile(self.x,nummber) + self.y = np.tile(self.y, nummber) + self.z = np.tile(self.z, nummber) + self.x = np.r_[self.x, x2] + self.y = np.r_[self.y, y2] + self.z = np.r_[self.z, z2] + def copy(self): + x=self.x.copy() + y=self.y.copy() + z=self.z.copy() + ve=vec3(x,y,z) + return ve + + + +rgb = vec3 +tangent=vec3 +(w, h) = (400, 300) # Screen size +L = vec3(0, 0.35, -1.) # Point light position +E = vec3(0., 0.35, -10) # Eye position +FARAWAY = 1.0e39 # an implausibly huge distance +(low,high)=(0.8,1) +# O is the ray origin, D is the normalized ray direction. +# scene is a list of Sphere objects (see +# below) +# bounce is the number of the bounce, starting at zero for camera rays. +def raytrace(O, D, scene,tangent, bounce = 0): + distances = [s.intersect(O, D) for s in scene] + nearest = reduce(np.minimum, distances) + color = rgb(0, 0, 0) + for (s, d) in zip(scene, distances): + hit = (nearest != FARAWAY) & (d == nearest) + if np.any(hit): + dc = extract(hit, d) + Oc = O.extract(hit) + Dc = D.extract(hit) + cc = s.light(Oc, Dc, dc, scene, bounce,tangent) + color += cc.place(hit) + return color +class Sphere: + def __init__(self, center, r, diffuse, mirror = 0): + self.c = center + self.r = r + self.diffuse = diffuse + self.mirror = mirror + def intersect(self, O, D): + b = 2 * D.dot(O - self.c) + c = abs(self.c) + abs(O) - 2 * self.c.dot(O) - (self.r * self.r) + disc = (b ** 2) - (4 * c) + sq = np.sqrt(np.maximum(0, disc)) + h0 = (-b - sq) / 2 + h1 = (-b + sq) / 2 + h = np.where((h0 > 0) & (h0 < h1), h0, h1) + pred = (disc > 0) & (h > 0) + return np.where(pred, h, FARAWAY) + def diffusecolor(self, M): + return self.diffuse #h = np.where((h0 > 0) & (h0 < h1), h0, h1) + + + def light(self, O, D, d, scene, bounce,tangent): + M = (O + D * d) # intersection point + N = (M - self.c) * (1. / self.r) # normal + toL = (L - M).norm() # direction to light + toO = (E - M).norm() # direction to ray origin + nudged = M + N * .0001 # M nudged to avoid itself + length = len(toL.x) + Bumpscale = 0.5 + nummber = length // tangent.x.size + yushu = length % tangent.x.size + tangent.yanchang(nummber, yushu) + tangent.mul_xy(Bumpscale) + tangent.getZ(tangent.x.size) + + # Shadow: find if the point is shadowed or not. + # This amounts to finding out if M can see the light + light_distances = [s.intersect(nudged, toL) for s in scene] + light_nearest = reduce(np.minimum, light_distances) + seelight = light_distances[scene.index(self)] == light_nearest + # Ambient + color = rgb(0.05, 0.05, 0.05) + # Lambert shading (diffuse) + lv = np.maximum(0,tangent.dot(toL)*-1) + color += self.diffusecolor(M) * lv * seelight + # Reflection + if bounce < 2: + rayD = (D - N * 2 * D.dot(N)).norm() + tangent2=tangent.copy() + color += raytrace(nudged, rayD, scene,tangent2, bounce + 1) * self.mirror + + # Blinn-Phong shading (specular) + tangent.y = tangent.y * -1 + tangent.x = tangent.x * -1 + phong = np.maximum(0,tangent.dot((toL + toO).norm())*-1) + color += rgb(1, 1, 1) * np.power(np.clip(0,phong, 1),50) * seelight + return color +def building(): + rgb = vec3 + tangent = vec3 + (w, h) = (400, 300) # Screen size + L = vec3(0, 0.35, -1.) # Point light position + E = vec3(0., 0.35, -10) # Eye position + FARAWAY = 1.0e39 # an implausibly huge distance + (low, high) = (0.8, 1) + img = imageio.imread('water2.png') + x = img.take([0], axis=2) + x = np.array(x).reshape(x.size) + y = img.take([1], axis=2) + y = np.array(y).reshape(y.size) + z = img.take([2], axis=2) + z = np.array(z).reshape(z.size) + length = z.size + i = 0 + ping = np.ones(length) + while i < length: + ping[i] = x[i] ** 2 + y[i] ** 2 + z[i] ** 2 + i += 1 + ping = np.sqrt(ping) + x = np.true_divide(x, ping) + y = np.true_divide(y, ping) + z = np.true_divide(z, ping) + ones = np.ones(length) + x = x * 2 - ones + y = y * 2 - ones + z = z * 2 - ones + tangent = vec3(x, y, z) + + scene = [ + Sphere(vec3(0, .1, 0.5), .6, rgb(.221, .169, .105)), + ] + # r is the result of + r = float(w) / h + # Screen coordinates: x0, y0, x1, y1. + S = (-1., 1. / r + .25, 1., -1. / r + .25) + x2 = np.tile(np.linspace(S[0], S[2], w), h) + y2 = np.repeat(np.linspace(S[1], S[3], h), w) + t0 = time.time() + Q = vec3(x2, y2, 0) + color = raytrace(E, (Q - E).norm(), scene, tangent, 0) + print("Took", time.time() - t0) + l = [] + for c in color.components(): + l.append((255 * np.clip(c, 0, 1).reshape((h, w))).astype(np.uint8)) + list(list([l[0][i][j], l[1][i][j], l[2][i][j]] for j in range(len(l[0][0]))) for i in range(len(l[0]))) + output = np.array( + list(list([l[0][i][j], l[1][i][j], l[2][i][j]] for j in range(len(l[0][0]))) for i in range(len(l[0])))) + imageio.imwrite(r"output.jpg", output) + return 1 + diff --git a/features/bumping-map/test_render.py b/features/bumping-map/test_render.py new file mode 100644 index 0000000..313c5d0 --- /dev/null +++ b/features/bumping-map/test_render.py @@ -0,0 +1,11 @@ +import unittest +import render as bp +class TestBUmpingmap(unittest.TestCase): + + def test_bumping_map(self): + d=bp.building() + self.assertEqual(d,1) + + + + diff --git a/features/bumping-map/water2.png b/features/bumping-map/water2.png new file mode 100644 index 0000000..8c6f4a0 Binary files /dev/null and b/features/bumping-map/water2.png differ diff --git a/features/teapot/.test.sh.swp b/features/teapot/.test.sh.swp new file mode 100644 index 0000000..1aa9cfe Binary files /dev/null and b/features/teapot/.test.sh.swp differ diff --git a/features/teapot/README.md b/features/teapot/README.md new file mode 100644 index 0000000..4733b4d --- /dev/null +++ b/features/teapot/README.md @@ -0,0 +1,2 @@ +# 犹他茶壶 +> 渲染一个犹他茶壶,加入动效,并保存保存渲染结果 diff --git a/features/teapot/main.py b/features/teapot/main.py new file mode 100644 index 0000000..e69de29 diff --git a/features/teapot/prepare.sh b/features/teapot/prepare.sh new file mode 100644 index 0000000..72c1e5c --- /dev/null +++ b/features/teapot/prepare.sh @@ -0,0 +1,3 @@ +sudo pip install pyopengl +sudo apt-get install freeglut3-dev +sudo apt-get install freeglut3 diff --git a/features/teapot/requirements.txt b/features/teapot/requirements.txt new file mode 100644 index 0000000..1a4c1ef --- /dev/null +++ b/features/teapot/requirements.txt @@ -0,0 +1,2 @@ +pyopengl +pillow diff --git a/features/teapot/teapot.png b/features/teapot/teapot.png new file mode 100644 index 0000000..2a4120c Binary files /dev/null and b/features/teapot/teapot.png differ diff --git a/features/teapot/teapot.py b/features/teapot/teapot.py new file mode 100644 index 0000000..6f1c67c --- /dev/null +++ b/features/teapot/teapot.py @@ -0,0 +1,91 @@ +import sys +from OpenGL.GL import * +from OpenGL.GLU import * +from OpenGL.GLUT import * +import time + +from PIL import Image +from PIL import ImageOps + +offset = 0 +count=20 + +class Teapot: + def display(self): + global offset + glEnable(GL_LIGHTING) + glEnable(GL_LIGHT0) + glEnable(GL_DEPTH_TEST) + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) + glRotatef(1, 0, 0, 1) + glTranslatef(offset,0,0) + glPushMatrix() + gluLookAt(0, 6, 8, 0, 0, 0, 0, 1, 0) + glutSolidTeapot(2.5) + glPopMatrix() + glFlush() + + + def reshape(self,w, h): + glViewport(0, 0, w, h) + glMatrixMode(GL_PROJECTION) + glLoadIdentity() + gluPerspective(45.0, 1.0 * w / h, 0.1, 100.0) + glMatrixMode(GL_MODELVIEW) + glLoadIdentity() + + +# def keyboard(self,key, x, y): +# if key == chr(32): sys.exit(0) + + def idle(self): + global offset + global count + global window_id + time.sleep(0.1) + count=count-0.1 + if (offset < -0.3): + offset = 0 + elif (offset > 0.3): + offset = 0.3-offset + elif (offset < 0): + offset = offset - 0.01 + elif (offset <= 0.3 ): + offset=offset+0.01 + + self.display() + + def init(self): + global window_id + glutInit(sys.argv) + glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH) + glutInitWindowSize(500, 400) + window_id = glutCreateWindow('teapot') + glutReshapeFunc(self.reshape) +# glutKeyboardFunc(self.keyboard) + glutDisplayFunc(self.display) + glutIdleFunc(self.idle) + + + def main(self): + global count + self.init() + while (count > 0): + self.idle() + glutMainLoopEvent() + + + def save(self): + self.init() + glutMainLoopEvent() + time.sleep(1) + glPixelStorei(GL_PACK_ALIGNMENT, 1) + data = glReadPixels(0, 0, 500, 400, GL_RGBA, GL_UNSIGNED_BYTE) + image = Image.frombytes("RGBA", (500, 400), data) + image = ImageOps.flip(image) + image.save("teapot.png", "PNG") + + +#if __name__ == "__main__": +# t=Teapot() +# t.main() diff --git a/features/teapot/test.sh b/features/teapot/test.sh new file mode 100644 index 0000000..b430e2b --- /dev/null +++ b/features/teapot/test.sh @@ -0,0 +1 @@ +pytest --cov=./ diff --git a/features/teapot/test_teapot.py b/features/teapot/test_teapot.py new file mode 100644 index 0000000..1eae92a --- /dev/null +++ b/features/teapot/test_teapot.py @@ -0,0 +1,16 @@ +import unittest +from features.teapot.teapot import Teapot + +class TestTeapot(unittest.TestCase): + + def test_main(self): + t=Teapot() + t.main() + assert True + + + def test_teapot(self): + t=Teapot() + t.save() + assert True + diff --git a/relaxrender/points.py b/relaxrender/points.py index e3fbe31..0e012da 100644 --- a/relaxrender/points.py +++ b/relaxrender/points.py @@ -47,10 +47,10 @@ class Vector: data_width = Point3D.data_width*2 + 1 def __init__(self, p_start, p_end): - # p_start and p_end is point3D. - # for line, p_start and p_end are two points on the line. - # for ray, p_start is the starting point, p_end is some point on the ray. - # for segment, p_start is one end point, p_end is the other end point. + # p_start and p_end is point3D. + # for line, p_start and p_end are two points on the line. + # for ray, p_start is the starting point, p_end is some point on the ray. + # for segment, p_start is one end point, p_end is the other end point. self.start = p_start self.end = p_end self.mode = 'real' diff --git a/relaxrender/render.py b/relaxrender/render.py new file mode 100644 index 0000000..162f977 --- /dev/null +++ b/relaxrender/render.py @@ -0,0 +1,219 @@ +from functools import reduce +import numpy as np +import time +import numbers +import imageio +import random +import sys +import math + +def extract(cond, x): + if isinstance(x, numbers.Number): + return x + else: + return np.extract(cond, x) + +class vec3(): + def __init__(self, x, y, z): + (self.x, self.y, self.z) = (x, y, z) + def __mul__(self, other): + return vec3(self.x * other, self.y * other, self.z * other) + def mul_xy(self,other): + self.x=self.x * other + self.y=self.y * other + def __add__(self, other): + return vec3(self.x + other.x, self.y + other.y, self.z + other.z) + def __sub__(self, other): + return vec3(self.x - other.x, self.y - other.y, self.z - other.z) + def dot(self, other): + return (self.x * other.x) + (self.y * other.y) + (self.z * other.z) + def getZ(self,length=0): + ones=np.ones(length) + xy=self.x*self.x+self.y*self.y + self.z=np.sqrt(ones-xy) + def __abs__(self): + return self.dot(self) + def norm(self): + mag = np.sqrt(abs(self)) + return self * (1.0 / np.where(mag == 0.3, 1, mag)) + def components(self): + return (self.x, self.y, self.z) + def extract(self, cond): + return vec3(extract(cond, self.x), + extract(cond, self.y), + extract(cond, self.z)) + def place(self, cond): + r = vec3(np.zeros(cond.shape), np.zeros(cond.shape), np.zeros(cond.shape)) + np.place(r.x, cond, self.x) + np.place(r.y, cond, self.y) + np.place(r.z, cond, self.z) + return r + def jiequ(self,length): + self.x=self.x[:length] + self.y = self.y[:length] + self.z = self.z[:length] + def yanchang(self,nummber,yushu): + x2 = self.x[:yushu] + y2 = self.y[:yushu] + z2 = self.z[:yushu] + self.x=np.tile(self.x,nummber) + self.y = np.tile(self.y, nummber) + self.z = np.tile(self.z, nummber) + self.x = np.r_(self.x, x2) + self.y = np.r_(self.y, y2) + self.z = np.r_(self.z, z2) + def copy(self): + x=self.x.copy() + y=self.y.copy() + z=self.z.copy() + ve=vec3(x,y,z) + return ve + +rgb = vec3 +tangent=vec3 +(w, h) = (400, 300) # Screen size +L = vec3(0, 0.35, -1.) # Point light position +E = vec3(0., 0.35, -10) # Eye position +FARAWAY = 1.0e39 # an implausibly huge distance +(low,high)=(0.8,1) + + +def raytrace(O, D, scene,tangent, bounce = 0): + # O is the ray origin, D is the normalized ray direction + # scene is a list of Sphere objects (see + # below) + # bounce is the number of the bounce, starting at zero for camera rays + distances = [s.intersect(O, D) for s in scene] + nearest = reduce(np.minimum, distances) + color = rgb(0, 0, 0) + for (s, d) in zip(scene, distances): + hit = (nearest != FARAWAY) & (d == nearest) + if np.any(hit): + dc = extract(hit, d) + Oc = O.extract(hit) + Dc = D.extract(hit) + cc = s.light(Oc, Dc, dc, scene, bounce,tangent) + color += cc.place(hit) + return color + +class Sphere: + def __init__(self, center, r, diffuse, mirror = 0): + self.c = center + self.r = r + self.diffuse = diffuse + self.mirror = mirror + + def intersect(self, O, D): + b = 2 * D.dot(O - self.c) + c = abs(self.c) + abs(O) - 2 * self.c.dot(O) - (self.r * self.r) + disc = (b ** 2) - (4 * c) + sq = np.sqrt(np.maximum(0, disc)) + h0 = (-b - sq) / 2 + h1 = (-b + sq) / 2 + h = np.where((h0 > 0) & (h0 < h1), h0, h1) + pred = (disc > 0) & (h > 0) + return np.where(pred, h, FARAWAY) + + def diffusecolor(self, M): + return self.diffuse + + def light(self, O, D, d, scene, bounce,tangent): + M = (O + D * d) # intersection point + N = (M - self.c) * (1. / self.r) # normal + toL = (L - M).norm() # direction to light + toO = (E - M).norm() # direction to ray origin + nudged = M + N * .0001 # M nudged to avoid itself + length=len(toL.x) + Bumpscale=0.5 + if tangent.x.size>length: + tangent.jiequ(length) + else: + print("ss") + nummber=length/tangent.x.size + yushu=length%tangent.x.size + tangent.yanchang(nummber,yushu) + tangent.mul_xy(Bumpscale) + tangent.getZ(tangent.x.size) + if self.r>1: + print("xxx") + tangent=N.copy() + # Shadow: find if the point is shadowed or not. + # This amounts to finding out if M can see the light + light_distances = [s.intersect(nudged, toL) for s in scene] + light_nearest = reduce(np.minimum, light_distances) + seelight = light_distances[scene.index(self)] == light_nearest + # Ambient + color = rgb(0.05, 0.05, 0.05) + # Lambert shading (diffuse) + lv = np.maximum(0,tangent.dot(toL)*-1) + color += self.diffusecolor(M) * lv * seelight + + # Reflection + if bounce < 2: + rayD = (D - N * 2 * D.dot(N)).norm() + tangent2=tangent.copy() + color += raytrace(nudged, rayD, scene,tangent2, bounce + 1) * self.mirror + + # Blinn-Phong shading (specular) + phong = np.maximum(0,tangent.dot((toL + toO).norm())*-1) + color += rgb(1, 1, 1) * np.power(np.clip(0,phong, 1),50) * seelight + return color + +class CheckeredSphere(Sphere): + def diffusecolor(self, M): + checker = ((M.x * 2).astype(int) % 2) == ((M.z * 2).astype(int) % 2) + return self.diffuse * checker + +def building(): + rgb = vec3 + tangent = vec3 + (w, h) = (400, 300) # Screen size + L = vec3(0, 0.35, -1.) # Point light position + E = vec3(0., 0.35, -10) # Eye position + FARAWAY = 1.0e39 # an implausibly huge distance + (low, high) = (0.8, 1) + img = imageio.imread('water2.png') + x = img.take([0], axis=2) + x = np.array(x).reshape(x.size) + y = img.take([1], axis=2) + y = np.array(y).reshape(y.size) + z = img.take([2], axis=2) + z = np.array(z).reshape(z.size) + length = z.size + i = 0 + ping = np.ones(length) + while i < length: + ping[i] = x[i] ** 2 + y[i] ** 2 + z[i] ** 2 + i += 1 + ping = np.sqrt(ping) + x = np.true_divide(x, ping) + y = np.true_divide(y, ping) + z = np.true_divide(z, ping) + ones = np.ones(length) + x = x * 2 - ones + y = y * 2 - ones + z = z * 2 - ones + tangent = vec3(x, y, z) + + scene = [ + # Sphere(vec3(.75, .1, 1.), .6, rgb(0, 0, 1)), + Sphere(vec3(0, .1, 0.5), .6, rgb(.221, .169, .105)), + # CheckeredSphere(vec3(0,-99999.5, 0), 99999, rgb(.75, .75, .75), 0.25), + ] + + r = float(w) / h + # Screen coordinates: x0, y0, x1, y1. + S = (-1., 1. / r + .25, 1., -1. / r + .25) + x2 = np.tile(np.linspace(S[0], S[2], w), h) + y2 = np.repeat(np.linspace(S[1], S[3], h), w) + t0 = time.time() + Q = vec3(x2, y2, 0) + color = raytrace(E, (Q - E).norm(), scene, tangent, 0) + print("Took", time.time() - t0) + l = [] + for c in color.components(): + l.append((255 * np.clip(c, 0, 1).reshape((h, w))).astype(np.uint8)) + list(list([l[0][i][j], l[1][i][j], l[2][i][j]] for j in range(len(l[0][0]))) for i in range(len(l[0]))) + output = np.array( + list(list([l[0][i][j], l[1][i][j], l[2][i][j]] for j in range(len(l[0][0]))) for i in range(len(l[0])))) + imageio.imwrite(r"output.png", output) diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..34ffd83 --- /dev/null +++ b/test.sh @@ -0,0 +1,2 @@ +export PYTHONPATH=./ # 将项目根目录作为PYTHONPATH +pytest --cov-report=html --cov=relaxrender --ignore=tests/test_relaxrender.py tests \ No newline at end of file