DataLore error when trying to fit an image classification model

I am getting this error when trying to fit my model using a datalore notebook:
I am using the GPU S Machine.

Traceback (most recent call last):
  at cell 6, line 24
  at /opt/python/envs/default/lib/python3.8/site-packages/keras/utils/traceback_utils.py, line 70, in error_handler(*args, **kwargs)
  at /opt/python/envs/default/lib/python3.8/site-packages/tensorflow/python/eager/execute.py, line 52, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
InternalError: Graph execution error: Detected at node 'StatefulPartitionedCall_61' defined at (most recent call last): File "/opt/python/lib/python3.8/runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "/opt/python/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel_launcher.py", line 16, in <module> app.launch_new_instance() File "/opt/python/envs/default/lib/python3.8/site-packages/traitlets/config/application.py", line 1043, in launch_instance app.start() File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/kernelapp.py", line 612, in start self.io_loop.start() File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/platform/asyncio.py", line 195, in start self.asyncio_loop.run_forever() File "/opt/python/lib/python3.8/asyncio/base_events.py", line 570, in run_forever self._run_once() File "/opt/python/lib/python3.8/asyncio/base_events.py", line 1859, in _run_once handle._run() File "/opt/python/lib/python3.8/asyncio/events.py", line 81, in _run self._context.run(self._callback, *self._args) File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/ioloop.py", line 685, in <lambda> lambda f: self._run_callback(functools.partial(callback, future)) File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/ioloop.py", line 738, in _run_callback ret = callback() File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/gen.py", line 825, in inner self.ctx_run(self.run) File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/gen.py", line 786, in run yielded = self.gen.send(value) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/kernelbase.py", line 358, in process_one yield gen.maybe_future(dispatch(*args)) File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/gen.py", line 234, in wrapper yielded = ctx_run(next, result) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/kernelbase.py", line 261, in dispatch_shell yield gen.maybe_future(handler(stream, idents, msg)) File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/gen.py", line 234, in wrapper yielded = ctx_run(next, result) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/kernelbase.py", line 536, in execute_request self.do_execute( File "/opt/python/envs/default/lib/python3.8/site-packages/tornado/gen.py", line 234, in wrapper yielded = ctx_run(next, result) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/ipkernel.py", line 302, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "/opt/python/envs/default/lib/python3.8/site-packages/ipykernel/zmqshell.py", line 539, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 2914, in run_cell result = self._run_cell( File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 2960, in _run_cell return runner(coro) File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/async_helpers.py", line 78, in _pseudo_sync_runner coro.send(None) File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3185, in run_cell_async has_raised = await self.run_ast_nodes(code_ast.body, cell_name, File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3377, in run_ast_nodes if (await self.run_code(code, result, async_=asy)): File "/opt/python/envs/default/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3457, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-6-e46c72bc7443>", line 24, in <module> hist = model.fit(train_images, y[:train_size], epochs=500, batch_size=32, callbacks=[checkpointLowLoss, learning_rate_scheduler], validation_data=(val_images, val_y)) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 65, in error_handler return fn(*args, **kwargs) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/engine/training.py", line 1650, in fit tmp_logs = self.train_function(iterator) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/engine/training.py", line 1249, in train_function return step_function(self, iterator) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/engine/training.py", line 1233, in step_function outputs = model.distribute_strategy.run(run_step, args=(data,)) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/engine/training.py", line 1222, in run_step outputs = model.train_step(data) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/engine/training.py", line 1027, in train_step self.optimizer.minimize(loss, self.trainable_variables, tape=tape) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 527, in minimize self.apply_gradients(grads_and_vars) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 1140, in apply_gradients return super().apply_gradients(grads_and_vars, name=name) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 634, in apply_gradients iteration = self._internal_apply_gradients(grads_and_vars) File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 1166, in _internal_apply_gradients return tf.__internal__.distribute.interim.maybe_merge_call( File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 1216, in _distributed_apply_gradients_fn distribution.extended.update( File "/opt/python/envs/default/lib/python3.8/site-packages/keras/optimizers/optimizer_experimental/optimizer.py", line 1211, in apply_grad_to_update_var return self._update_step_xla(grad, var, id(self._var_key(var))) Node: 'StatefulPartitionedCall_61' libdevice not found at ./libdevice.10.bc [[{{node StatefulPartitionedCall_61}}]] [Op:__inference_train_function_7552]

This is the code:

# train model
hist = model.fit(train_images, y[:train_size], epochs=500, batch_size=32, callbacks=[checkpointLowLoss, learning_rate_scheduler], validation_data=(val_images, val_y))

There are the libraries imported:

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from sklearn.utils import shuffle
import keras

from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, BatchNormalization, Dropout, RandomRotation, RandomZoom, GlobalAveragePooling2D, AveragePooling2D
from keras.layers import SeparableConv2D

I’ve found on the internet that this error appears when the GPU cannot run TensorFlow