Hello, I hope someone would be able to help me with the issues with the following error message.

I am currently running GSEAPY using Jupyterlab 3.0.14 on windows:


Below is the error message

MemoryError Traceback (most recent call last)
in
----> 1 pre_res = gp.prerank(rnk=C_rank, gene_sets="BTM_for_GSEA_20131008_eoz_rev.gmt",
2 permutation_num=100, # reduce number to speed up testing
3 outdir="test/prerank_S4_vs_WT_BTM", seed=6)

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesgseapygsea.py in prerank(rnk, gene_sets, outdir, pheno_pos, pheno_neg, min_size, max_size, permutation_num, weighted_score_type, ascending, processes, figsize, format, graph_num, no_plot, seed, verbose)
1050 min_size, max_size, permutation_num, weighted_score_type,
1051 ascending, processes, figsize, format, graph_num, no_plot, seed, verbose)
-> 1052 pre.run()
1053 return pre
1054

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesgseapygsea.py in run(self)
506 self._logger.info("Start to run GSEA...Might take a while..................")
507 # compute ES, NES, pval, FDR, RES
--> 508 gsea_results, hit_ind,rank_ES, subsets = gsea_compute(data=dat2, n=self.permutation_num, gmt=gmt,
509 weighted_score_type=self.weighted_score_type,
510 permutation_type="gene_set", method=None,

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesgseapyalgorithm.py in gsea_compute(data, gmt, n, weighted_score_type, permutation_type, method, pheno_pos, pheno_neg, classes, ascending, processes, seed, single, scale)
509 # pool_esnu.join()
510
--> 511 temp_esnu = Parallel(n_jobs=processes)(delayed(enrichment_score)(
512 gl, cor_vec, gmt.get(subset), w, n,
513 rs, single, scale)

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblibparallel.py in call(self, iterable)
1042 self._iterating = self._original_iterator is not None
1043
-> 1044 while self.dispatch_one_batch(iterator):
1045 pass
1046

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblibparallel.py in dispatch_one_batch(self, iterator)
857 return False
858 else:
--> 859 self._dispatch(tasks)
860 return True
861

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblibparallel.py in _dispatch(self, batch)
775 with self._lock:
776 job_idx = len(self._jobs)
--> 777 job = self._backend.apply_async(batch, callback=cb)
778 # A job can complete so quickly than its callback is
779 # called before we get here, causing self._jobs to

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblib_parallel_backends.py in apply_async(self, func, callback)
206 def apply_async(self, func, callback=None):
207 """Schedule a func to be run"""
--> 208 result = ImmediateResult(func)
209 if callback:
210 callback(result)

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblib_parallel_backends.py in init(self, batch)
570 # Don't delay the application, to avoid keeping the input
571 # arguments in memory
--> 572 self.results = batch()
573
574 def get(self):

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblibparallel.py in call(self)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesjoblibparallel.py in (.0)
260 # change the default number of processes to -1
261 with parallel_backend(self._backend, n_jobs=self._n_jobs):
--> 262 return [func(*args, **kwargs)
263 for func, args, kwargs in self.items]
264

D:UserLocalDocumentsBioinformaticsJupyterlablibsite-packagesgseapyalgorithm.py in enrichment_score(gene_list, correl_vector, gene_set, weighted_score_type, nperm, rs, single, scale)
74 norm_no_tag = 1.0/Nmiss
75
---> 76 RES = np.cumsum(tag_indicator correl_vector norm_tag - no_tag_indicator * norm_no_tag, axis=axis)
77
78 if scale: RES = RES / N

MemoryError: Unable to allocate 22.7 MiB for an array with shape (101, 29396) and data type float64

Thank you very much

Sincerely,



Source link