Closed
Description
Describe the issue:
Dear all,
I have just made a fresh install of PyMC v5.3.1 with PyTensor v2.11.3 in a new conda environment. When sampling a model I get the attached error message, which tells me that "Faddeeva.cc" is not found. I checked with an earlier version and this file was indeed present in the corresponding directory, whereas in the new version it is not there. Just copying this file to the appropriate location fixes the issue.
Cheers,
Dominique Eckert
Reproducable code example:
/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/scalar/c_code/Faddeeva.cc not found
Error message:
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/compile/function/types.py:1756, in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys, fgraph)
1744 m = Maker(
1745 inputs,
1746 outputs,
(...)
1753 fgraph=fgraph,
1754 )
1755 with config.change_flags(compute_test_value="off"):
-> 1756 fn = m.create(defaults)
1757 finally:
1758 t2 = time.perf_counter()
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/compile/function/types.py:1649, in FunctionMaker.create(self, input_storage, storage_map)
1646 start_import_time = pytensor.link.c.cmodule.import_time
1648 with config.change_flags(traceback__limit=config.traceback__compile_limit):
-> 1649 _fn, _i, _o = self.linker.make_thunk(
1650 input_storage=input_storage_lists, storage_map=storage_map
1651 )
1653 end_linker = time.perf_counter()
1655 linker_time = end_linker - start_linker
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/basic.py:254, in LocalLinker.make_thunk(self, input_storage, output_storage, storage_map, **kwargs)
247 def make_thunk(
248 self,
249 input_storage: Optional["InputStorageType"] = None,
(...)
252 **kwargs,
253 ) -> Tuple["BasicThunkType", "InputStorageType", "OutputStorageType"]:
--> 254 return self.make_all(
255 input_storage=input_storage,
256 output_storage=output_storage,
257 storage_map=storage_map,
258 )[:3]
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/vm.py:1252, in VMLinker.make_all(self, profiler, input_storage, output_storage, storage_map)
1250 thunks[-1].lazy = False
1251 except Exception:
-> 1252 raise_with_op(fgraph, node)
1254 t1 = time.perf_counter()
1256 if self.profile:
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/utils.py:535, in raise_with_op(fgraph, node, thunk, exc_info, storage_map)
530 warnings.warn(
531 f"{exc_type} error does not allow us to add an extra error message"
532 )
533 # Some exception need extra parameter in inputs. So forget the
534 # extra long error message in that case.
--> 535 raise exc_value.with_traceback(exc_trace)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/vm.py:1243, in VMLinker.make_all(self, profiler, input_storage, output_storage, storage_map)
1238 thunk_start = time.perf_counter()
1239 # no-recycling is done at each VM.__call__ So there is
1240 # no need to cause duplicate c code by passing
1241 # no_recycling here.
1242 thunks.append(
-> 1243 node.op.make_thunk(node, storage_map, compute_map, [], impl=impl)
1244 )
1245 linker_make_thunk_time[node] = time.perf_counter() - thunk_start
1246 if not hasattr(thunks[-1], "lazy"):
1247 # We don't want all ops maker to think about lazy Ops.
1248 # So if they didn't specify that its lazy or not, it isn't.
1249 # If this member isn't present, it will crash later.
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/op.py:131, in COp.make_thunk(self, node, storage_map, compute_map, no_recycling, impl)
127 self.prepare_node(
128 node, storage_map=storage_map, compute_map=compute_map, impl="c"
129 )
130 try:
--> 131 return self.make_c_thunk(node, storage_map, compute_map, no_recycling)
132 except (NotImplementedError, MethodNotDefined):
133 # We requested the c code, so don't catch the error.
134 if impl == "c":
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/op.py:96, in COp.make_c_thunk(self, node, storage_map, compute_map, no_recycling)
94 print(f"Disabling C code for {self} due to unsupported float16")
95 raise NotImplementedError("float16")
---> 96 outputs = cl.make_thunk(
97 input_storage=node_input_storage, output_storage=node_output_storage
98 )
99 thunk, node_input_filters, node_output_filters = outputs
101 @is_cthunk_wrapper_type
102 def rval():
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:1200, in CLinker.make_thunk(self, input_storage, output_storage, storage_map, cache, **kwargs)
1165 """Compile this linker's `self.fgraph` and return a function that performs the computations.
1166
1167 The return values can be used as follows:
(...)
1197
1198 """
1199 init_tasks, tasks = self.get_init_tasks()
-> 1200 cthunk, module, in_storage, out_storage, error_storage = self.__compile__(
1201 input_storage, output_storage, storage_map, cache
1202 )
1204 res = _CThunk(cthunk, init_tasks, tasks, error_storage, module)
1205 res.nodes = self.node_order
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:1120, in CLinker.__compile__(self, input_storage, output_storage, storage_map, cache)
1118 input_storage = tuple(input_storage)
1119 output_storage = tuple(output_storage)
-> 1120 thunk, module = self.cthunk_factory(
1121 error_storage,
1122 input_storage,
1123 output_storage,
1124 storage_map,
1125 cache,
1126 )
1127 return (
1128 thunk,
1129 module,
(...)
1138 error_storage,
1139 )
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:1644, in CLinker.cthunk_factory(self, error_storage, in_storage, out_storage, storage_map, cache)
1642 if cache is None:
1643 cache = get_module_cache()
-> 1644 module = cache.module_from_key(key=key, lnk=self)
1646 vars = self.inputs + self.outputs + self.orphans
1647 # List of indices that should be ignored when passing the arguments
1648 # (basically, everything that the previous call to uniq eliminated)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/cmodule.py:1203, in ModuleCache.module_from_key(self, key, lnk)
1200 if module is not None:
1201 return module
-> 1203 src_code = lnk.get_src_code()
1204 # Is the source code already in the cache?
1205 module_hash = get_module_hash(src_code, key)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:1523, in CLinker.get_src_code(self)
1522 def get_src_code(self):
-> 1523 mod = self.get_dynamic_module()
1524 return mod.code()
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:1594, in CLinker.get_dynamic_module(self)
1580 static = """
1581 static int {struct_name}_executor({struct_name} *self) {{
1582 return self->run();
(...)
1590 struct_name=self.struct_name
1591 )
1593 # We add all the support code, compile args, headers and libs we need.
-> 1594 for support_code in self.support_code() + self.c_support_code_apply:
1595 mod.add_support_code(support_code)
1596 mod.add_support_code(self.struct_code)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/link/c/basic.py:927, in CLinker.support_code(self)
925 # generic support code
926 for x in [y.type for y in self.variables] + [y.op for y in self.node_order]:
--> 927 support_code = x.c_support_code()
928 if isinstance(support_code, list):
929 ret.extend(support_code)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/tensor/elemwise.py:1181, in Elemwise.c_support_code(self, **kwargs)
1180 def c_support_code(self, **kwargs):
-> 1181 return self.scalar_op.c_support_code(**kwargs)
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/scalar/basic.py:4339, in Composite.c_support_code(self, **kwargs)
4337 def c_support_code(self, **kwargs):
4338 # Remove duplicate code blocks by using a `set`
-> 4339 rval = {
4340 subnode.op.c_support_code(**kwargs).strip()
4341 for subnode in self.fgraph.toposort()
4342 }
4343 return "\n".join(sorted(rval))
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/scalar/basic.py:4340, in <setcomp>(.0)
4337 def c_support_code(self, **kwargs):
4338 # Remove duplicate code blocks by using a `set`
4339 rval = {
-> 4340 subnode.op.c_support_code(**kwargs).strip()
4341 for subnode in self.fgraph.toposort()
4342 }
4343 return "\n".join(sorted(rval))
File ~/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/scalar/math.py:153, in Erfcx.c_support_code(self, **kwargs)
151 def c_support_code(self, **kwargs):
152 # Using Faddeeva.cc source file from: http://ab-initio.mit.edu/wiki/index.php/Faddeeva_Package
--> 153 with open(
154 os.path.join(os.path.dirname(__file__), "c_code", "Faddeeva.cc")
155 ) as f:
156 raw = f.read()
157 return raw
FileNotFoundError: [Errno 2] No such file or directory: '/Users/deckert/opt/anaconda3/envs/pymc/lib/python3.11/site-packages/pytensor/scalar/c_code/Faddeeva.cc'
Apply node that caused the error: Elemwise{Composite}(cdelta_interval__, TensorConstant{2.0}, TensorConstant{2.2512917986064953}, TensorConstant{-inf}, TensorConstant{1.8702642513208303}, TensorConstant{0.5}, TensorConstant{1.4142135865763297}, TensorConstant{1.8878150731936125}, TensorConstant{-0.5}, TensorConstant{0.3333333333333333}, TensorConstant{-4.0}, TensorConstant{1.0}, TensorConstant{10.0}, TensorConstant{1.9544109736938513}, TensorConstant{7.78110478588066}, TensorConstant{0.001}, TensorConstant{-2000.0}, TensorConstant{300.0}, rdelta_interval__, TensorConstant{4000.0}, TensorConstant{8.216088098632316}, TensorConstant{2.0})
Toposort index: 0
Inputs types: [TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float32, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ()), TensorType(float64, ())]
HINT: Use a linker other than the C linker to print the inputs' shapes and strides.
HINT: Re-running with most PyTensor optimizations disabled could provide a back-trace showing when this node was created. This can be done by setting the PyTensor flag 'optimizer=fast_compile'. If that does not work, PyTensor optimizations can be disabled with 'optimizer=None'.
HINT: Use the PyTensor flag `exception_verbosity=high` for a debug print-out and storage map footprint of this Apply node.
PyTensor version information:
2.11.3
Context for the issue:
No response