create new python environment¶
mamba create -n fractal-env python=3.11 -y
# initialize
mamba init
# close terminal
mamba activate fractal-env
mamba install numpy matplotlib numba pytorch torchvision torchaudio cudatoolkit=11.8 -c pytorch -c nvidia
python -m ipykernel install --name fractal-env --display-name "Fractal Env"
To get started with generating fractals and displaying them using Three.js in a browser, we can break down the task into two main parts:
- Generating Fractals in Python: We'll use scientific Python libraries such as NumPy and Matplotlib to create fractal patterns.
- Displaying Fractals with Three.js: We'll convert the generated data into a format that Three.js can use to render the fractals in a browser.
Let's start with the Python part. We'll write code to generate examples of self-similar and self-affine fractals in 1D, 2D, and 3D.
1D Fractals¶
Example: Cantor Set
import matplotlib.pyplot as plt
def cantor_set(ax, x, y, length, depth):
if depth == 0:
ax.plot([x, x + length], [y, y], color='black', lw=2)
else:
length /= 3
cantor_set(ax, x, y, length, depth-1)
cantor_set(ax, x + 2 * length, y, length, depth-1)
fig, ax = plt.subplots(figsize=(10, 2))
ax.set_title('Cantor Set')
ax.set_axis_off()
cantor_set(ax, 0, 0, 27, 5)
plt.show()
2D Fractals¶
Example: Sierpinski Triangle
import matplotlib.pyplot as plt
import numpy as np
def sierpinski_triangle(ax, vertices, depth):
if depth == 0:
triangle = plt.Polygon(vertices, edgecolor='black')
ax.add_patch(triangle)
else:
midpoints = [(vertices[i] + vertices[(i + 1) % 3]) / 2 for i in range(3)]
sierpinski_triangle(ax, [vertices[0], midpoints[0], midpoints[2]], depth-1)
sierpinski_triangle(ax, [vertices[1], midpoints[1], midpoints[0]], depth-1)
sierpinski_triangle(ax, [vertices[2], midpoints[2], midpoints[1]], depth-1)
fig, ax = plt.subplots()
ax.set_title('Sierpinski Triangle')
ax.set_axis_off()
vertices = np.array([[0, 0], [1, 0], [0.5, np.sqrt(3) / 2]])
sierpinski_triangle(ax, vertices, 5)
plt.show()
3D Fractals¶
Example: Menger Sponge
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
def menger_sponge(ax, x, y, z, size, depth):
if depth == 0:
r = [[x, y, z], [x + size, y, z], [x + size, y + size, z], [x, y + size, z]]
ax.add_collection3d(Poly3DCollection([r], facecolors='blue', linewidths=1, edgecolors='black'))
else:
size /= 3
for dx in [0, 1, 2]:
for dy in [0, 1, 2]:
for dz in [0, 1, 2]:
if (dx == 1 and dy == 1) or (dx == 1 and dz == 1) or (dy == 1 and dz == 1):
continue
menger_sponge(ax, x + dx * size, y + dy * size, z + dz * size, size, depth - 1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title('Menger Sponge')
menger_sponge(ax, 0, 0, 0, 27, 3)
plt.show()
Converting to Three.js¶
We can convert the fractal data into a format that Three.js can render. For instance, we'll export the vertex positions and faces for the 3D Menger Sponge.
Here's a simple example of how you might export data for use in Three.js:
import json
def generate_menger_sponge_data(x, y, z, size, depth):
vertices = []
faces = []
# Define a function to recursively add vertices and faces
# ...
# Return vertices and faces as lists
return vertices, faces
vertices, faces = generate_menger_sponge_data(0, 0, 0, 27, 3)
with open('menger_sponge.json', 'w') as f:
json.dump({'vertices': vertices, 'faces': faces}, f)
Displaying with Three.js¶
In your HTML file, you would load this JSON data and use Three.js to render it:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Menger Sponge</title>
<style>
body { margin: 0; }
canvas { display: block; }
</style>
</head>
<body>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
<script>
let scene = new THREE.Scene();
let camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
let renderer = new THREE.WebGLRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
document.body.appendChild(renderer.domElement);
fetch('menger_sponge.json')
.then(response => response.json())
.then(data => {
let geometry = new THREE.Geometry();
data.vertices.forEach(v => geometry.vertices.push(new THREE.Vector3(v[0], v[1], v[2])));
data.faces.forEach(f => geometry.faces.push(new THREE.Face3(f[0], f[1], f[2])));
let material = new THREE.MeshBasicMaterial({ color: 0x00ff00, wireframe: true });
let mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
camera.position.z = 50;
let animate = function () {
requestAnimationFrame(animate);
mesh.rotation.x += 0.01;
mesh.rotation.y += 0.01;
renderer.render(scene, camera);
};
animate();
});
</script>
</body>
</html>
With these examples, you should be able to generate fractal data in Python and display it using Three.js. Feel free to modify and expand these examples based on your specific needs
1D Self-Affine Fractal: Fractional Brownian Motion¶
Fractional Brownian motion (fBm) is a generalization of Brownian motion and can be used to create self-affine fractals.
import numpy as np
import matplotlib.pyplot as plt
def fractional_brownian_motion(H, length, n):
dt = length / n
t = np.linspace(0, length, n)
dB = np.random.normal(0, np.sqrt(dt), n)
B = np.cumsum(dB)
fBm = t ** H * B
return t, fBm
H = 0.7 # Hurst exponent
length = 1.0
n = 1000
t, fBm = fractional_brownian_motion(H, length, n)
plt.figure(figsize=(10, 4))
plt.plot(t, fBm, label=f'H={H}')
plt.title('Fractional Brownian Motion')
plt.xlabel('Time')
plt.ylabel('fBm')
plt.legend()
plt.show()
2D Self-Affine Fractal: Perlin Noise¶
Perlin noise is used to generate textures that are self-affine.
import numpy as np
import matplotlib.pyplot as plt
def generate_perlin_noise_2d(shape, res):
def f(t):
return 6 * t**5 - 15 * t**4 + 10 * t**3
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0:res[0]:d[0]*1j, 0:res[1]:d[1]*1j]
grid = grid.transpose(1, 2, 0) % 1
angles = 2 * np.pi * np.random.rand(res[0] + 1, res[1] + 1)
gradients = np.dstack((np.cos(angles), np.sin(angles)))
g00 = gradients[:-1, :-1]
g10 = gradients[1:, :-1]
g01 = gradients[:-1, 1:]
g11 = gradients[1:, 1:]
n00 = np.sum(grid * g00.repeat(d[0], 0).repeat(d[1], 1), axis=2)
n10 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1])) * g10.repeat(d[0], 0).repeat(d[1], 1), axis=2)
n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1] - 1)) * g01.repeat(d[0], 0).repeat(d[1], 1), axis=2)
n11 = np.sum((grid - 1) * g11.repeat(d[0], 0).repeat(d[1], 1), axis=2)
t = f(grid)
n0 = n00 * (1 - t[:, :, 0]) + t[:, :, 0] * n10
n1 = n01 * (1 - t[:, :, 0]) + t[:, :, 0] * n11
return np.sqrt(2) * ((1 - t[:, :, 1]) * n0 + t[:, :, 1] * n1)
shape = (512, 512)
res = (8, 8)
noise = generate_perlin_noise_2d(shape, res)
plt.figure(figsize=(10, 10))
plt.imshow(noise, cmap='gray')
plt.title('2D Perlin Noise')
plt.axis('off')
plt.show()
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[6], line 32 30 shape = (512, 512) 31 res = (8, 8) ---> 32 noise = generate_perlin_noise_2d(shape, res) 34 plt.figure(figsize=(10, 10)) 35 plt.imshow(noise, cmap='gray') Cell In[6], line 20, in generate_perlin_noise_2d(shape, res) 17 g01 = gradients[:-1, 1:] 18 g11 = gradients[1:, 1:] ---> 20 n00 = np.sum(grid * g00.repeat(d[0], 0).repeat(d[1], 1), axis=2) 21 n10 = np.sum(np.dstack((grid[:, :, 0] - 1, grid[:, :, 1])) * g10.repeat(d[0], 0).repeat(d[1], 1), axis=2) 22 n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1] - 1)) * g01.repeat(d[0], 0).repeat(d[1], 1), axis=2) ValueError: operands could not be broadcast together with shapes (64,64,2) (512,512,2)
3D Self-Affine Fractal: 3D Fractional Brownian Motion¶
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def fractional_brownian_motion_3d(H, length, n):
dt = length / n
t = np.linspace(0, length, n)
dB = np.random.normal(0, np.sqrt(dt), (n, 3))
B = np.cumsum(dB, axis=0)
fBm = t[:, None] ** H * B
return t, fBm
H = 0.7 # Hurst exponent
length = 1.0
n = 1000
t, fBm = fractional_brownian_motion_3d(H, length, n)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.plot(fBm[:, 0], fBm[:, 1], fBm[:, 2], label=f'H={H}')
ax.set_title('3D Fractional Brownian Motion')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.legend()
plt.show()
These examples provide a starting point for generating self-affine fractals in 1D, 2D, and 3D. You can expand and modify these examples based on your specific requirements and the details from the manuscript.
Python code to create self-affine branching patterns resembling a fern and a tree using Matplotlib. These patterns are generated using Iterated Function Systems (IFS).
import matplotlib.pyplot as plt
import numpy as np
def fern(n):
x, y = [0], [0]
for _ in range(n):
r = np.random.random()
if r < 0.01:
x.append(0)
y.append(0.16 * y[-1])
elif r < 0.86:
x.append(0.85 * x[-1] + 0.04 * y[-1])
y.append(-0.04 * x[-1] + 0.85 * y[-1] + 1.6)
elif r < 0.93:
x.append(0.2 * x[-1] - 0.26 * y[-1])
y.append(0.23 * x[-1] + 0.22 * y[-1] + 1.6)
else:
x.append(-0.15 * x[-1] + 0.28 * y[-1])
y.append(0.26 * x[-1] + 0.24 * y[-1] + 0.44)
return x, y
x, y = fern(100000)
plt.figure(figsize=(10, 10))
plt.scatter(x, y, s=0.1, color='green')
plt.title('Self-Affine Fern Pattern')
plt.axis('off')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
def tree(n):
x, y = [0], [0]
for _ in range(n):
r = np.random.random()
if r < 0.05:
x.append(0)
y.append(0.5 * y[-1])
elif r < 0.45:
x.append(0.42 * x[-1] - 0.42 * y[-1])
y.append(0.42 * x[-1] + 0.42 * y[-1] + 0.4)
else:
x.append(0.42 * x[-1] + 0.42 * y[-1])
y.append(-0.42 * x[-1] + 0.42 * y[-1] + 0.4)
return x, y
x, y = tree(100000)
plt.figure(figsize=(10, 10))
plt.scatter(x, y, s=0.1, color='brown')
plt.title('Self-Affine Tree Pattern')
plt.axis('off')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
def draw_branch(x, y, angle, length, thickness, ax):
if length > 0.1:
x_new = x + length * np.cos(angle)
y_new = y + length * np.sin(angle)
ax.plot([x, x_new], [y, y_new], color='brown', lw=thickness)
new_length = length * 0.7
new_thickness = thickness * 0.7
draw_branch(x_new, y_new, angle + np.pi / 6, new_length, new_thickness, ax)
draw_branch(x_new, y_new, angle - np.pi / 6, new_length, new_thickness, ax)
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_axis_off()
draw_branch(0, 0, np.pi / 2, 10, 2, ax)
plt.title('2D Fractal Tree with Varying Thickness')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
def draw_branch(x, y, angle, length, thickness, splits, ax):
if length > 0.1:
x_new = x + length * np.cos(angle)
y_new = y + length * np.sin(angle)
ax.plot([x, x_new], [y, y_new], color='brown', lw=thickness)
# Adjust new_length and new_thickness factors to vary branch length and thickness
new_length = length * 0.7
new_thickness = thickness * 0.7
for i in range(splits):
# Vary the angle of splits, for example between -np.pi/6 and np.pi/6
new_angle = angle + np.pi * (i / (splits - 1) - 0.5) / 3
draw_branch(x_new, y_new, new_angle, new_length, new_thickness, splits, ax)
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_axis_off()
# Initial parameters: origin, initial angle, initial length, initial thickness, number of splits
draw_branch(0, 0, np.pi / 2, 10, 2, 4, ax)
plt.title('2D Fractal Tree with Varying Branches')
plt.show()
To leverage CUDA and parallel GPU threads for faster calculations in generating the fractal tree, we'll use the numba library, which supports CUDA for Python. Here's the updated code using numba for parallel computations:
Prerequisites Ensure you have numba and numpy installed:
import numpy as np
import matplotlib.pyplot as plt
from numba import cuda, float32, int32
@cuda.jit
def draw_branch(x, y, angle, length, thickness, splits, d_x, d_y, d_thickness):
idx = cuda.grid(1)
if idx < x.size:
x_new = x[idx] + length * np.cos(angle[idx])
y_new = y[idx] + length * np.sin(angle[idx])
new_length = length * 0.7
new_thickness = thickness[idx] * 0.7
for i in range(splits):
new_angle = angle[idx] + np.pi * (i / (splits - 1) - 0.5) / 3
d_x[idx, i] = x_new
d_y[idx, i] = y_new
d_thickness[idx, i] = new_thickness
def generate_tree(num_branches, splits):
x = np.zeros(num_branches, dtype=np.float32)
y = np.zeros(num_branches, dtype=np.float32)
angle = np.ones(num_branches, dtype=np.float32) * (np.pi / 2)
thickness = np.ones(num_branches, dtype=np.float32) * 2
d_x = np.zeros((num_branches, splits), dtype=np.float32)
d_y = np.zeros((num_branches, splits), dtype=np.float32)
d_thickness = np.zeros((num_branches, splits), dtype=np.float32)
threads_per_block = 128
blocks_per_grid = (num_branches + (threads_per_block - 1)) // threads_per_block
draw_branch[blocks_per_grid, threads_per_block](x, y, angle, 10.0, thickness, splits, d_x, d_y, d_thickness)
return d_x.flatten(), d_y.flatten(), d_thickness.flatten()
num_branches = 1024
splits = 4
x, y, thickness = generate_tree(num_branches, splits)
plt.figure(figsize=(10, 10))
for i in range(1, len(x)):
plt.plot([x[i-1], x[i]], [y[i-1], y[i]], color='brown', lw=thickness[i])
plt.title('2D Fractal Tree with Varying Thickness (CUDA Accelerated)')
plt.axis('off')
plt.show()
/opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:536: NumbaPerformanceWarning: Grid size 8 will likely result in GPU under-utilization due to low occupancy.
warn(NumbaPerformanceWarning(msg))
--------------------------------------------------------------------------- CudaSupportError Traceback (most recent call last) Cell In[8], line 40 38 num_branches = 1024 39 splits = 4 ---> 40 x, y, thickness = generate_tree(num_branches, splits) 42 plt.figure(figsize=(10, 10)) 43 for i in range(1, len(x)): Cell In[8], line 34, in generate_tree(num_branches, splits) 31 threads_per_block = 128 32 blocks_per_grid = (num_branches + (threads_per_block - 1)) // threads_per_block ---> 34 draw_branch[blocks_per_grid, threads_per_block](x, y, angle, 10.0, thickness, splits, d_x, d_y, d_thickness) 36 return d_x.flatten(), d_y.flatten(), d_thickness.flatten() File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:539, in _LaunchConfiguration.__call__(self, *args) 538 def __call__(self, *args): --> 539 return self.dispatcher.call(args, self.griddim, self.blockdim, 540 self.stream, self.sharedmem) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:681, in CUDADispatcher.call(self, args, griddim, blockdim, stream, sharedmem) 679 kernel = next(iter(self.overloads.values())) 680 else: --> 681 kernel = _dispatcher.Dispatcher._cuda_call(self, *args) 683 kernel.launch(args, griddim, blockdim, stream, sharedmem) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:689, in CUDADispatcher._compile_for_args(self, *args, **kws) 687 assert not kws 688 argtypes = [self.typeof_pyval(a) for a in args] --> 689 return self.compile(tuple(argtypes)) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:932, in CUDADispatcher.compile(self, sig) 929 if not self._can_compile: 930 raise RuntimeError("Compilation disabled") --> 932 kernel = _Kernel(self.py_func, argtypes, **self.targetoptions) 933 # We call bind to force codegen, so that there is a cubin to cache 934 kernel.bind() File /opt/conda/lib/python3.10/site-packages/numba/core/compiler_lock.py:35, in _CompilerLock.__call__.<locals>._acquire_compile_lock(*args, **kwargs) 32 @functools.wraps(func) 33 def _acquire_compile_lock(*args, **kwargs): 34 with self: ---> 35 return func(*args, **kwargs) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:82, in _Kernel.__init__(self, py_func, argtypes, link, debug, lineinfo, inline, fastmath, extensions, max_registers, opt, device) 75 self.extensions = extensions or [] 77 nvvm_options = { 78 'fastmath': fastmath, 79 'opt': 3 if opt else 0 80 } ---> 82 cc = get_current_device().compute_capability 83 cres = compile_cuda(self.py_func, types.void, self.argtypes, 84 debug=self.debug, 85 lineinfo=lineinfo, (...) 88 nvvm_options=nvvm_options, 89 cc=cc) 90 tgt_ctx = cres.target_context File /opt/conda/lib/python3.10/site-packages/numba/cuda/api.py:443, in get_current_device() 441 def get_current_device(): 442 "Get current device associated with the current thread" --> 443 return current_context().device File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:220, in get_context(devnum) 216 def get_context(devnum=None): 217 """Get the current device or use a device by device number, and 218 return the CUDA context. 219 """ --> 220 return _runtime.get_or_create_context(devnum) File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:138, in _Runtime.get_or_create_context(self, devnum) 136 attached_ctx = self._get_attached_context() 137 if attached_ctx is None: --> 138 return self._get_or_create_context_uncached(devnum) 139 else: 140 return attached_ctx File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:153, in _Runtime._get_or_create_context_uncached(self, devnum) 147 """See also ``get_or_create_context(devnum)``. 148 This version does not read the cache. 149 """ 150 with self._lock: 151 # Try to get the active context in the CUDA stack or 152 # activate GPU-0 with the primary context --> 153 with driver.get_active_context() as ac: 154 if not ac: 155 return self._activate_context_for(0) File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/driver.py:495, in _ActiveContext.__enter__(self) 493 else: 494 hctx = drvapi.cu_context(0) --> 495 driver.cuCtxGetCurrent(byref(hctx)) 496 hctx = hctx if hctx.value else None 498 if hctx is None: File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/driver.py:295, in Driver.__getattr__(self, fname) 292 self.ensure_initialized() 294 if self.initialization_error is not None: --> 295 raise CudaSupportError("Error at driver init: \n%s:" % 296 self.initialization_error) 298 if USE_NV_BINDING: 299 return self._cuda_python_wrap_fn(fname) CudaSupportError: Error at driver init: CUDA driver library cannot be found. If you are sure that a CUDA driver is installed, try setting environment variable NUMBA_CUDA_DRIVER with the file path of the CUDA driver shared library. :
import numpy as np
import matplotlib.pyplot as plt
from numba import cuda
@cuda.jit
def generate_fern(n, x, y):
idx = cuda.grid(1)
if idx < n:
for i in range(10000): # Adjust the number of iterations if needed
r = np.random.random()
if r < 0.01:
x[idx, i+1] = 0
y[idx, i+1] = 0.16 * y[idx, i]
elif r < 0.86:
x[idx, i+1] = 0.85 * x[idx, i] + 0.04 * y[idx, i]
y[idx, i+1] = -0.04 * x[idx, i] + 0.85 * y[idx, i] + 1.6
elif r < 0.93:
x[idx, i+1] = 0.2 * x[idx, i] - 0.26 * y[idx, i]
y[idx, i+1] = 0.23 * x[idx, i] + 0.22 * y[idx, i] + 1.6
else:
x[idx, i+1] = -0.15 * x[idx, i] + 0.28 * y[idx, i]
y[idx, i+1] = 0.26 * x[idx, i] + 0.24 * y[idx, i] + 0.44
def plot_fern():
n = 1024
iterations = 10000
x = np.zeros((n, iterations + 1), dtype=np.float32)
y = np.zeros((n, iterations + 1), dtype=np.float32)
threads_per_block = 128
blocks_per_grid = (n + (threads_per_block - 1)) // threads_per_block
generate_fern[blocks_per_grid, threads_per_block](n, x, y)
x_flat = x.flatten()
y_flat = y.flatten()
plt.figure(figsize=(10, 10))
plt.scatter(x_flat, y_flat, s=0.1, color='green')
plt.title('CUDA Accelerated 2D Fractal Fern Pattern')
plt.axis('off')
plt.show()
plot_fern()
--------------------------------------------------------------------------- CudaSupportError Traceback (most recent call last) Cell In[9], line 44 41 plt.axis('off') 42 plt.show() ---> 44 plot_fern() Cell In[9], line 33, in plot_fern() 30 threads_per_block = 128 31 blocks_per_grid = (n + (threads_per_block - 1)) // threads_per_block ---> 33 generate_fern[blocks_per_grid, threads_per_block](n, x, y) 35 x_flat = x.flatten() 36 y_flat = y.flatten() File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:539, in _LaunchConfiguration.__call__(self, *args) 538 def __call__(self, *args): --> 539 return self.dispatcher.call(args, self.griddim, self.blockdim, 540 self.stream, self.sharedmem) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:681, in CUDADispatcher.call(self, args, griddim, blockdim, stream, sharedmem) 679 kernel = next(iter(self.overloads.values())) 680 else: --> 681 kernel = _dispatcher.Dispatcher._cuda_call(self, *args) 683 kernel.launch(args, griddim, blockdim, stream, sharedmem) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:689, in CUDADispatcher._compile_for_args(self, *args, **kws) 687 assert not kws 688 argtypes = [self.typeof_pyval(a) for a in args] --> 689 return self.compile(tuple(argtypes)) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:932, in CUDADispatcher.compile(self, sig) 929 if not self._can_compile: 930 raise RuntimeError("Compilation disabled") --> 932 kernel = _Kernel(self.py_func, argtypes, **self.targetoptions) 933 # We call bind to force codegen, so that there is a cubin to cache 934 kernel.bind() File /opt/conda/lib/python3.10/site-packages/numba/core/compiler_lock.py:35, in _CompilerLock.__call__.<locals>._acquire_compile_lock(*args, **kwargs) 32 @functools.wraps(func) 33 def _acquire_compile_lock(*args, **kwargs): 34 with self: ---> 35 return func(*args, **kwargs) File /opt/conda/lib/python3.10/site-packages/numba/cuda/dispatcher.py:82, in _Kernel.__init__(self, py_func, argtypes, link, debug, lineinfo, inline, fastmath, extensions, max_registers, opt, device) 75 self.extensions = extensions or [] 77 nvvm_options = { 78 'fastmath': fastmath, 79 'opt': 3 if opt else 0 80 } ---> 82 cc = get_current_device().compute_capability 83 cres = compile_cuda(self.py_func, types.void, self.argtypes, 84 debug=self.debug, 85 lineinfo=lineinfo, (...) 88 nvvm_options=nvvm_options, 89 cc=cc) 90 tgt_ctx = cres.target_context File /opt/conda/lib/python3.10/site-packages/numba/cuda/api.py:443, in get_current_device() 441 def get_current_device(): 442 "Get current device associated with the current thread" --> 443 return current_context().device File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:220, in get_context(devnum) 216 def get_context(devnum=None): 217 """Get the current device or use a device by device number, and 218 return the CUDA context. 219 """ --> 220 return _runtime.get_or_create_context(devnum) File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:138, in _Runtime.get_or_create_context(self, devnum) 136 attached_ctx = self._get_attached_context() 137 if attached_ctx is None: --> 138 return self._get_or_create_context_uncached(devnum) 139 else: 140 return attached_ctx File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/devices.py:153, in _Runtime._get_or_create_context_uncached(self, devnum) 147 """See also ``get_or_create_context(devnum)``. 148 This version does not read the cache. 149 """ 150 with self._lock: 151 # Try to get the active context in the CUDA stack or 152 # activate GPU-0 with the primary context --> 153 with driver.get_active_context() as ac: 154 if not ac: 155 return self._activate_context_for(0) File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/driver.py:495, in _ActiveContext.__enter__(self) 493 else: 494 hctx = drvapi.cu_context(0) --> 495 driver.cuCtxGetCurrent(byref(hctx)) 496 hctx = hctx if hctx.value else None 498 if hctx is None: File /opt/conda/lib/python3.10/site-packages/numba/cuda/cudadrv/driver.py:295, in Driver.__getattr__(self, fname) 292 self.ensure_initialized() 294 if self.initialization_error is not None: --> 295 raise CudaSupportError("Error at driver init: \n%s:" % 296 self.initialization_error) 298 if USE_NV_BINDING: 299 return self._cuda_python_wrap_fn(fname) CudaSupportError: Error at driver init: CUDA driver library cannot be found. If you are sure that a CUDA driver is installed, try setting environment variable NUMBA_CUDA_DRIVER with the file path of the CUDA driver shared library. :