Hello ! I am learning oddt. so I am trying to reproduce Snippet_1 and Snippet_2. But I got two errors.
The first is got in the Snippet_1. It seems to occurred by pipeline.write() function.
Error in the Snippet_1.

TypeError Traceback (most recent call last)
File :12
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/virtualscreening.py:406, in virtualscreening.write(self, fmt, filename, csv_filename, **kwargs)
404 else:
405 kwargs['opt'] = {'c': None}
--> 406 output_mol_file = oddt.toolkit.Outputfile(fmt,
407 filename,
408 overwrite=True,
409 **kwargs)
410 if csv_filename:
411 f = open(csv_filename, 'w')
TypeError: oddt.toolkits.ob.Outputfile() got multiple values for keyword argument 'overwrite'
Error in the Snippet_2.
In [7] (Build training and testing descriptors), I don not know, is that can be an error, which is shown below.

Error in [8] ( Validate all regressor models), This issue resulted in the loss of the fourth figure.
Random Forest
Internal CV [R^2]: Train 0.9269 Test 0.5795
CV R^2_mean: 0.4754 R_std: 0.0321
Multiple Linear Regression
Internal CV [R^2]: Train 0.4109 Test 0.3603
CV R^2_mean: 0.3545 R_std: 0.0175
Partial Least Squares
Internal CV [R^2]: Train 0.3406 Test 0.3291
CV R^2_mean: 0.3176 R_std: 0.0324
Neural Network
Internal CV [R^2]: Train 0.2359 Test 0.1799
Empty Traceback (most recent call last)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:862, in Parallel.dispatch_one_batch(self, iterator)
861 try:
--> 862 tasks = self._ready_batches.get(block=False)
863 except queue.Empty:
864 # slice the iterator n_jobs * batchsize items at a time. If the
865 # slice returns less than that, then the current batchsize puts
(...)
868 # accordingly to distribute evenly the last items between all
869 # workers.
File ~/miniconda3/envs/oddt/lib/python3.9/queue.py:168, in Queue.get(self, block, timeout)
167 if not self._qsize():
--> 168 raise Empty
169 elif timeout is None:
Empty:
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
Cell In [8], line 24
22 cv_descs = np.vstack((descriptors_train, descriptors_test))
23 cv_act = list(pdbbind_act_train.values()) + list(pdbbind_act_test.values())
---> 24 cv = cross_validate(m, cv_descs, cv_act , n=3)
25 print('CV R^2_mean: %.4f R_std: %.4f' % (cv.mean(), np.std(cv)))
27 # Train Set
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/init.py:56, in cross_validate(model, cv_set, cv_target, n, shuffle, n_jobs)
54 else:
55 cv = n
---> 56 return cross_val_score(model, cv_set, cv_target, cv=cv, n_jobs=n_jobs)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:509, in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
506 # To ensure multimetric format is not supported
507 scorer = check_scoring(estimator, scoring=scoring)
--> 509 cv_results = cross_validate(
510 estimator=estimator,
511 X=X,
512 y=y,
513 groups=groups,
514 scoring={"score": scorer},
515 cv=cv,
516 n_jobs=n_jobs,
517 verbose=verbose,
518 fit_params=fit_params,
519 pre_dispatch=pre_dispatch,
520 error_score=error_score,
521 )
522 return cv_results["test_score"]
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:267, in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
264 # We clone the estimator to make sure that all the folds are
265 # independent, and that it is pickle-able.
266 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
--> 267 results = parallel(
268 delayed(_fit_and_score)(
269 clone(estimator),
270 X,
271 y,
272 scorers,
273 train,
274 test,
275 verbose,
276 None,
277 fit_params,
278 return_train_score=return_train_score,
279 return_times=True,
280 return_estimator=return_estimator,
281 error_score=error_score,
282 )
283 for train, test in cv.split(X, y, groups)
284 )
286 _warn_about_fit_failures(results, error_score)
288 # For callabe scoring, the return type is only know after calling. If the
289 # return type is a dictionary, the error scores can now be inserted with
290 # the correct key.
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:1085, in Parallel.call(self, iterable)
1076 try:
1077 # Only set self._iterating to True if at least a batch
1078 # was dispatched. In particular this covers the edge
(...)
1082 # was very quick and its callback already dispatched all the
1083 # remaining jobs.
1084 self._iterating = False
-> 1085 if self.dispatch_one_batch(iterator):
1086 self._iterating = self._original_iterator is not None
1088 while self.dispatch_one_batch(iterator):
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:873, in Parallel.dispatch_one_batch(self, iterator)
870 n_jobs = self._cached_effective_n_jobs
871 big_batch_size = batch_size * n_jobs
--> 873 islice = list(itertools.islice(iterator, big_batch_size))
874 if len(islice) == 0:
875 return False
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:269, in (.0)
264 # We clone the estimator to make sure that all the folds are
265 # independent, and that it is pickle-able.
266 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
267 results = parallel(
268 delayed(_fit_and_score)(
--> 269 clone(estimator),
270 X,
271 y,
272 scorers,
273 train,
274 test,
275 verbose,
276 None,
277 fit_params,
278 return_train_score=return_train_score,
279 return_times=True,
280 return_estimator=return_estimator,
281 error_score=error_score,
282 )
283 for train, test in cv.split(X, y, groups)
284 )
286 _warn_about_fit_failures(results, error_score)
288 # For callabe scoring, the return type is only know after calling. If the
289 # return type is a dictionary, the error scores can now be inserted with
290 # the correct key.
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/base.py:87, in clone(estimator, safe)
85 for name, param in new_object_params.items():
86 new_object_params[name] = clone(param, safe=False)
---> 87 new_object = klass(**new_object_params)
88 params_set = new_object.get_params(deep=False)
90 # quick sanity check of the parameters of the clone
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/models/regressors.py:55, in neuralnetwork.init(self, *args, **kwargs)
54 def init(self, *args, **kwargs):
---> 55 super(neuralnetwork, self).init(*args, **kwargs)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/models/regressors.py:28, in OddtRegressor.init(self, *args, **kwargs)
26 if self._model is None:
27 raise ValueError('Model not specified!')
---> 28 model = self._model(*args, **local_kwargs)
30 self.pipeline = Pipeline([('empty_dims_remover', VarianceThreshold()),
31 ('scaler', StandardScaler()),
32 ('model', model)]).set_params(**kwargs)
TypeError: init() got an unexpected keyword argument 'memory'

Sorry for so many details, Thank you in advance!
Hello ! I am learning oddt. so I am trying to reproduce Snippet_1 and Snippet_2. But I got two errors.
The first is got in the Snippet_1. It seems to occurred by pipeline.write() function.
Error in the Snippet_1.

TypeError Traceback (most recent call last)
File :12
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/virtualscreening.py:406, in virtualscreening.write(self, fmt, filename, csv_filename, **kwargs)
404 else:
405 kwargs['opt'] = {'c': None}
--> 406 output_mol_file = oddt.toolkit.Outputfile(fmt,
407 filename,
408 overwrite=True,
409 **kwargs)
410 if csv_filename:
411 f = open(csv_filename, 'w')
TypeError: oddt.toolkits.ob.Outputfile() got multiple values for keyword argument 'overwrite'
Error in the Snippet_2.
In [7] (Build training and testing descriptors), I don not know, is that can be an error, which is shown below.

Error in [8] ( Validate all regressor models), This issue resulted in the loss of the fourth figure.
Random Forest
Internal CV [R^2]: Train 0.9269 Test 0.5795
CV R^2_mean: 0.4754 R_std: 0.0321
Multiple Linear Regression
Internal CV [R^2]: Train 0.4109 Test 0.3603
CV R^2_mean: 0.3545 R_std: 0.0175
Partial Least Squares
Internal CV [R^2]: Train 0.3406 Test 0.3291
CV R^2_mean: 0.3176 R_std: 0.0324
Neural Network
Internal CV [R^2]: Train 0.2359 Test 0.1799
Empty Traceback (most recent call last)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:862, in Parallel.dispatch_one_batch(self, iterator)
861 try:
--> 862 tasks = self._ready_batches.get(block=False)
863 except queue.Empty:
864 # slice the iterator n_jobs * batchsize items at a time. If the
865 # slice returns less than that, then the current batchsize puts
(...)
868 # accordingly to distribute evenly the last items between all
869 # workers.
File ~/miniconda3/envs/oddt/lib/python3.9/queue.py:168, in Queue.get(self, block, timeout)
167 if not self._qsize():
--> 168 raise Empty
169 elif timeout is None:
Empty:
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
Cell In [8], line 24
22 cv_descs = np.vstack((descriptors_train, descriptors_test))
23 cv_act = list(pdbbind_act_train.values()) + list(pdbbind_act_test.values())
---> 24 cv = cross_validate(m, cv_descs, cv_act , n=3)
25 print('CV R^2_mean: %.4f R_std: %.4f' % (cv.mean(), np.std(cv)))
27 # Train Set
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/init.py:56, in cross_validate(model, cv_set, cv_target, n, shuffle, n_jobs)
54 else:
55 cv = n
---> 56 return cross_val_score(model, cv_set, cv_target, cv=cv, n_jobs=n_jobs)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:509, in cross_val_score(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, error_score)
506 # To ensure multimetric format is not supported
507 scorer = check_scoring(estimator, scoring=scoring)
--> 509 cv_results = cross_validate(
510 estimator=estimator,
511 X=X,
512 y=y,
513 groups=groups,
514 scoring={"score": scorer},
515 cv=cv,
516 n_jobs=n_jobs,
517 verbose=verbose,
518 fit_params=fit_params,
519 pre_dispatch=pre_dispatch,
520 error_score=error_score,
521 )
522 return cv_results["test_score"]
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:267, in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
264 # We clone the estimator to make sure that all the folds are
265 # independent, and that it is pickle-able.
266 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
--> 267 results = parallel(
268 delayed(_fit_and_score)(
269 clone(estimator),
270 X,
271 y,
272 scorers,
273 train,
274 test,
275 verbose,
276 None,
277 fit_params,
278 return_train_score=return_train_score,
279 return_times=True,
280 return_estimator=return_estimator,
281 error_score=error_score,
282 )
283 for train, test in cv.split(X, y, groups)
284 )
286 _warn_about_fit_failures(results, error_score)
288 # For callabe scoring, the return type is only know after calling. If the
289 # return type is a dictionary, the error scores can now be inserted with
290 # the correct key.
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:1085, in Parallel.call(self, iterable)
1076 try:
1077 # Only set self._iterating to True if at least a batch
1078 # was dispatched. In particular this covers the edge
(...)
1082 # was very quick and its callback already dispatched all the
1083 # remaining jobs.
1084 self._iterating = False
-> 1085 if self.dispatch_one_batch(iterator):
1086 self._iterating = self._original_iterator is not None
1088 while self.dispatch_one_batch(iterator):
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/joblib/parallel.py:873, in Parallel.dispatch_one_batch(self, iterator)
870 n_jobs = self._cached_effective_n_jobs
871 big_batch_size = batch_size * n_jobs
--> 873 islice = list(itertools.islice(iterator, big_batch_size))
874 if len(islice) == 0:
875 return False
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/model_selection/_validation.py:269, in (.0)
264 # We clone the estimator to make sure that all the folds are
265 # independent, and that it is pickle-able.
266 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
267 results = parallel(
268 delayed(_fit_and_score)(
--> 269 clone(estimator),
270 X,
271 y,
272 scorers,
273 train,
274 test,
275 verbose,
276 None,
277 fit_params,
278 return_train_score=return_train_score,
279 return_times=True,
280 return_estimator=return_estimator,
281 error_score=error_score,
282 )
283 for train, test in cv.split(X, y, groups)
284 )
286 _warn_about_fit_failures(results, error_score)
288 # For callabe scoring, the return type is only know after calling. If the
289 # return type is a dictionary, the error scores can now be inserted with
290 # the correct key.
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/sklearn/base.py:87, in clone(estimator, safe)
85 for name, param in new_object_params.items():
86 new_object_params[name] = clone(param, safe=False)
---> 87 new_object = klass(**new_object_params)
88 params_set = new_object.get_params(deep=False)
90 # quick sanity check of the parameters of the clone
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/models/regressors.py:55, in neuralnetwork.init(self, *args, **kwargs)
54 def init(self, *args, **kwargs):
---> 55 super(neuralnetwork, self).init(*args, **kwargs)
File ~/miniconda3/envs/oddt/lib/python3.9/site-packages/oddt/scoring/models/regressors.py:28, in OddtRegressor.init(self, *args, **kwargs)
26 if self._model is None:
27 raise ValueError('Model not specified!')
---> 28 model = self._model(*args, **local_kwargs)
30 self.pipeline = Pipeline([('empty_dims_remover', VarianceThreshold()),
31 ('scaler', StandardScaler()),
32 ('model', model)]).set_params(**kwargs)
TypeError: init() got an unexpected keyword argument 'memory'

Sorry for so many details, Thank you in advance!