Skip to content

Commit 806bc88

Browse files
committed
formatting and linting improvements
1 parent 7c78d2f commit 806bc88

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

59 files changed

+1278
-1129
lines changed

benchmarks/benchmark.py

+30-24
Original file line numberDiff line numberDiff line change
@@ -11,59 +11,65 @@
1111
INSTANCES = 4
1212
WORKER_INSTANCES = 4
1313
NUM_ITEMS = 512
14-
EXAMPLES_DIR = Path(__file__).parent/'..'/'examples'/'datagen'
15-
14+
EXAMPLES_DIR = Path(__file__).parent / ".." / "examples" / "datagen"
15+
16+
1617
def main():
1718
parser = argparse.ArgumentParser()
18-
parser.add_argument('scene', help='Blender scene name to run', default='cube')
19+
parser.add_argument("scene", help="Blender scene name to run", default="cube")
1920
args = parser.parse_args()
2021

2122
launch_args = dict(
22-
scene=EXAMPLES_DIR/f'{args.scene}.blend',
23-
script=EXAMPLES_DIR/f'{args.scene}.blend.py',
23+
scene=EXAMPLES_DIR / f"{args.scene}.blend",
24+
script=EXAMPLES_DIR / f"{args.scene}.blend.py",
2425
num_instances=INSTANCES,
25-
named_sockets=['DATA']
26+
named_sockets=["DATA"],
2627
)
2728

28-
with btt.BlenderLauncher(**launch_args) as bl:
29-
ds = btt.RemoteIterableDataset(bl.launch_info.addresses['DATA'])
29+
with btt.BlenderLauncher(**launch_args) as bl:
30+
ds = btt.RemoteIterableDataset(bl.launch_info.addresses["DATA"])
3031
ds.stream_length(NUM_ITEMS)
31-
dl = data.DataLoader(ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=False)
32+
dl = data.DataLoader(
33+
ds, batch_size=BATCH, num_workers=WORKER_INSTANCES, shuffle=False
34+
)
3235

3336
# Wait to avoid timing startup times of Blender
3437
time.sleep(5)
35-
38+
3639
t0 = None
3740
tlast = None
3841
imgshape = None
39-
42+
4043
elapsed = []
4144
n = 0
4245
for item in dl:
43-
n += len(item['image'])
44-
if t0 is None: # 1st is warmup
46+
n += len(item["image"])
47+
if t0 is None: # 1st is warmup
4548
t0 = time.time()
4649
tlast = t0
47-
imgshape = item['image'].shape
48-
elif n % (50*BATCH) == 0:
50+
imgshape = item["image"].shape
51+
elif n % (50 * BATCH) == 0:
4952
t = time.time()
5053
elapsed.append(t - tlast)
5154
tlast = t
52-
print('.', end='')
55+
print(".", end="")
5356
assert n == NUM_ITEMS
5457

55-
t1 = time.time()
58+
t1 = time.time()
5659
N = NUM_ITEMS - BATCH
57-
B = NUM_ITEMS//BATCH - 1
58-
print(f'Time {(t1-t0)/N:.3f}sec/image, {(t1-t0)/B:.3f}sec/batch, shape {imgshape}')
60+
B = NUM_ITEMS // BATCH - 1
61+
print(
62+
f"Time {(t1-t0)/N:.3f}sec/image, {(t1-t0)/B:.3f}sec/batch, shape {imgshape}"
63+
)
5964

6065
fig, _ = plt.subplots()
6166
plt.plot(np.arange(len(elapsed)), elapsed)
62-
plt.title('Receive times between 50 consecutive batches')
63-
save_path = EXAMPLES_DIR / 'tmp' / 'batches_elapsed.png'
67+
plt.title("Receive times between 50 consecutive batches")
68+
save_path = EXAMPLES_DIR / "tmp" / "batches_elapsed.png"
6469
fig.savefig(str(save_path))
6570
plt.close(fig)
66-
print(f'Figure saved to {save_path}')
71+
print(f"Figure saved to {save_path}")
72+
6773

68-
if __name__ == '__main__':
69-
main()
74+
if __name__ == "__main__":
75+
main()

examples/compositor_normals_depth/compositor_normals_depth.blend.py

+7-11
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,4 @@
1-
21
import blendtorch.btb as btb
3-
import numpy as np
42
import bpy
53

64
SHAPE = (30, 30)
@@ -22,10 +20,7 @@ def post_frame(render, pub, animation):
2220
# After frame
2321
if anim.frameid == 1:
2422
imgs = render.render()
25-
pub.publish(
26-
normals=imgs['normals'],
27-
depth=imgs['depth']
28-
)
23+
pub.publish(normals=imgs["normals"], depth=imgs["depth"])
2924

3025
# Parse script arguments passed via blendtorch launcher
3126
btargs, _ = btb.parse_blendtorch_args()
@@ -40,14 +35,14 @@ def post_frame(render, pub, animation):
4035
meshes = scene.prepare(NSHAPES, sshape_res=SHAPE)
4136

4237
# Data source
43-
pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid)
38+
pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
4439

4540
# Setup default image rendering
4641
cam = btb.Camera()
4742
render = btb.CompositeRenderer(
4843
[
49-
btb.CompositeSelection('normals', 'Out1', 'Normals', 'RGB'),
50-
btb.CompositeSelection('depth', 'Out1', 'Depth', 'V'),
44+
btb.CompositeSelection("normals", "Out1", "Normals", "RGB"),
45+
btb.CompositeSelection("depth", "Out1", "Depth", "V"),
5146
],
5247
btid=btargs.btid,
5348
camera=cam,
@@ -57,8 +52,9 @@ def post_frame(render, pub, animation):
5752
anim = btb.AnimationController()
5853
anim.pre_animation.add(pre_anim, meshes)
5954
anim.post_frame.add(post_frame, render, pub, anim)
60-
anim.play(frame_range=(0, 1), num_episodes=-1,
61-
use_offline_render=False, use_physics=True)
55+
anim.play(
56+
frame_range=(0, 1), num_episodes=-1, use_offline_render=False, use_physics=True
57+
)
6258

6359

6460
main()

examples/compositor_normals_depth/generate.py

+8-10
Original file line numberDiff line numberDiff line change
@@ -3,35 +3,33 @@
33
import blendtorch.btt as btt
44
import matplotlib.pyplot as plt
55
import numpy as np
6-
import torch
76
from torch.utils import data
87

98

109
def main():
1110
# Define how we want to launch Blender
1211
launch_args = dict(
13-
scene=Path(__file__).parent/'compositor_normals_depth.blend',
14-
script=Path(__file__).parent/'compositor_normals_depth.blend.py',
12+
scene=Path(__file__).parent / "compositor_normals_depth.blend",
13+
script=Path(__file__).parent / "compositor_normals_depth.blend.py",
1514
num_instances=1,
16-
named_sockets=['DATA'],
15+
named_sockets=["DATA"],
1716
)
1817

1918
# Launch Blender
2019
with btt.BlenderLauncher(**launch_args) as bl:
2120
# Create remote dataset and limit max length to 16 elements.
22-
addr = bl.launch_info.addresses['DATA']
21+
addr = bl.launch_info.addresses["DATA"]
2322
ds = btt.RemoteIterableDataset(addr, max_items=4)
2423
dl = data.DataLoader(ds, batch_size=4, num_workers=0)
2524

2625
for item in dl:
27-
normals = item['normals']
26+
normals = item["normals"]
2827
# Note, normals are color-coded (0..1), to convert back to original
2928
# range (-1..1) use
3029
# true_normals = (normals - 0.5) * \
3130
# torch.tensor([2., 2., -2.]).view(1, 1, 1, -1)
32-
depth = item['depth']
33-
print('Received', normals.shape, depth.shape,
34-
depth.dtype, np.ptp(depth))
31+
depth = item["depth"]
32+
print("Received", normals.shape, depth.shape, depth.dtype, np.ptp(depth))
3533

3634
fig, axs = plt.subplots(2, 2)
3735
axs = np.asarray(axs).reshape(-1)
@@ -44,5 +42,5 @@ def main():
4442
plt.show()
4543

4644

47-
if __name__ == '__main__':
45+
if __name__ == "__main__":
4846
main()
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import bpy
2-
import bmesh
32
import numpy as np
43
import blendtorch.btb as btb
54
import supershape as sshape
@@ -8,7 +7,7 @@
87

98

109
def create_sshape_mesh(shape, material=None, fake_user=False):
11-
new_obj = sshape.make_bpy_mesh(shape, name='sshape', coll=False, weld=True)
10+
new_obj = sshape.make_bpy_mesh(shape, name="sshape", coll=False, weld=True)
1211
new_obj.data.use_fake_user = fake_user
1312
new_obj.use_fake_user = fake_user
1413
if material is not None:
@@ -17,17 +16,13 @@ def create_sshape_mesh(shape, material=None, fake_user=False):
1716
return new_obj
1817

1918

20-
def prepare(
21-
n_sshapes,
22-
sshape_res=(100, 100),
23-
collection='Generated',
24-
fake_user=False):
19+
def prepare(n_sshapes, sshape_res=(100, 100), collection="Generated", fake_user=False):
2520
coll = SCN.collection.children[collection]
2621

2722
# The following material renders camera-space normals
28-
mat = btb.materials.create_normal_material('normals')
23+
mat = btb.materials.create_normal_material("normals")
2924

30-
plane = bpy.data.objects['Plane']
25+
plane = bpy.data.objects["Plane"]
3126
plane.active_material = mat
3227

3328
sshapes = [
@@ -41,11 +36,11 @@ def prepare(
4136
SCN.rigidbody_world.collection.objects.link(s)
4237
# Rigid body settings
4338
s.rigid_body.enabled = True
44-
s.rigid_body.collision_shape = 'BOX'
39+
s.rigid_body.collision_shape = "BOX"
4540
s.rigid_body.friction = 0.7
4641
s.rigid_body.linear_damping = 0.3
4742
s.rigid_body.angular_damping = 0.4
48-
s.rigid_body.type = 'ACTIVE'
43+
s.rigid_body.type = "ACTIVE"
4944

5045
return sshapes
5146

@@ -54,11 +49,10 @@ def update_mesh(mesh, sshape_res=(100, 100)):
5449
params = np.random.uniform(
5550
low=[1.00, 1, 1, 6.0, 6.0, 6.0],
5651
high=[4.00, 1, 1, 10.0, 10.0, 10.0],
57-
size=(2, 6)
52+
size=(2, 6),
5853
)
5954
scale = np.abs(np.random.normal(0.05, 0.05, size=3))
6055
x, y, z = sshape.supercoords(params, shape=sshape_res)
61-
sshape.update_bpy_mesh(x*scale[0], y*scale[1], z*scale[2], mesh)
62-
mesh.location = np.random.uniform(
63-
low=[-0.5, -0.5, 1], high=[0.5, 0.5, 3], size=(3))
56+
sshape.update_bpy_mesh(x * scale[0], y * scale[1], z * scale[2], mesh)
57+
mesh.location = np.random.uniform(low=[-0.5, -0.5, 1], high=[0.5, 0.5, 3], size=(3))
6458
mesh.rotation_euler = np.random.uniform(low=-np.pi, high=np.pi, size=(3))

examples/control/cartpole.py

+11-8
Original file line numberDiff line numberDiff line change
@@ -16,24 +16,27 @@
1616

1717
KAPPA = 30
1818

19+
1920
def control(obs):
2021
# Simple P controller defining the error as xpole-xcart
2122
xcart, xpole, _ = obs
22-
return (xpole-xcart)*KAPPA
23+
return (xpole - xcart) * KAPPA
24+
2325

2426
def main():
25-
# Create the environment. The environment is registered in
26-
# `cartpole_gym/__init__.py`. Set `real_time=True` when you
27-
# want to keep the simulation running until the agent response.
28-
env = gym.make('blendtorch-cartpole-v0', real_time=False)
29-
30-
obs = env.reset()
27+
# Create the environment. The environment is registered in
28+
# `cartpole_gym/__init__.py`. Set `real_time=True` when you
29+
# want to keep the simulation running until the agent response.
30+
env = gym.make("blendtorch-cartpole-v0", real_time=False)
31+
32+
obs = env.reset()
3133
while True:
3234
obs, reward, done, info = env.step(control(obs))
3335
env.render()
3436
if done:
3537
obs = env.reset()
3638
env.done()
3739

38-
if __name__ == '__main__':
40+
41+
if __name__ == "__main__":
3942
main()
+1-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,3 @@
11
from gym.envs.registration import register
22

3-
register(
4-
id='blendtorch-cartpole-v0',
5-
entry_point='cartpole_gym.envs:CartpoleEnv'
6-
)
3+
register(id="blendtorch-cartpole-v0", entry_point="cartpole_gym.envs:CartpoleEnv")
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
from .cartpole_env import CartpoleEnv
1+
from .cartpole_env import CartpoleEnv # noqa

examples/control/cartpole_gym/envs/cartpole.blend.py

-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import bpy
2-
from mathutils import Matrix
32
import numpy as np
43

54
from blendtorch import btb

examples/control/cartpole_gym/envs/cartpole_env.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,17 @@
33
from gym import spaces
44
from blendtorch import btt
55

6+
67
class CartpoleEnv(btt.env.OpenAIRemoteEnv):
78
def __init__(self, render_every=10, real_time=False):
89

9-
super().__init__(version='0.0.1')
10+
super().__init__(version="0.0.1")
1011
self.launch(
11-
scene=Path(__file__).parent/'cartpole.blend',
12-
script=Path(__file__).parent/'cartpole.blend.py',
12+
scene=Path(__file__).parent / "cartpole.blend",
13+
script=Path(__file__).parent / "cartpole.blend.py",
1314
real_time=real_time,
14-
render_every=10,
15+
render_every=10,
1516
)
16-
17+
1718
self.action_space = spaces.Box(np.float32(-100), np.float32(100), shape=(1,))
18-
self.observation_space = spaces.Box(np.float32(-10), np.float32(10), shape=(1,))
19+
self.observation_space = spaces.Box(np.float32(-10), np.float32(10), shape=(1,))

examples/datagen/cube.blend.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
import blendtorch.btb as btb
55

6+
67
def main():
78
# Parse script arguments passed via blendtorch launcher
89
btargs, remainder = btb.parse_blendtorch_args()
@@ -12,29 +13,28 @@ def main():
1213

1314
def pre_frame():
1415
# Randomize cube rotation
15-
cube.rotation_euler = np.random.uniform(0,np.pi,size=3)
16-
16+
cube.rotation_euler = np.random.uniform(0, np.pi, size=3)
17+
1718
def post_frame(off, pub, anim, cam):
1819
# Called every after Blender finished processing a frame.
1920
# Will be sent to one of the remote dataset listener connected.
2021
pub.publish(
21-
image=off.render(),
22-
xy=cam.object_to_pixel(cube),
23-
frameid=anim.frameid
22+
image=off.render(), xy=cam.object_to_pixel(cube), frameid=anim.frameid
2423
)
2524

2625
# Data source
27-
pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid)
26+
pub = btb.DataPublisher(btargs.btsockets["DATA"], btargs.btid)
2827

2928
# Setup default image rendering
3029
cam = btb.Camera()
31-
off = btb.OffScreenRenderer(camera=cam, mode='rgb')
32-
off.set_render_style(shading='RENDERED', overlays=False)
30+
off = btb.OffScreenRenderer(camera=cam, mode="rgb")
31+
off.set_render_style(shading="RENDERED", overlays=False)
3332

3433
# Setup the animation and run endlessly
3534
anim = btb.AnimationController()
3635
anim.pre_frame.add(pre_frame)
37-
anim.post_frame.add(post_frame, off, pub, anim, cam)
38-
anim.play(frame_range=(0,100), num_episodes=-1)
36+
anim.post_frame.add(post_frame, off, pub, anim, cam)
37+
anim.play(frame_range=(0, 100), num_episodes=-1)
38+
3939

40-
main()
40+
main()

0 commit comments

Comments
 (0)