You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "D:\Pycharm\PyCharm Community Edition 2021.3.2\plugins\python-ce\helpers\pydev\pydevd.py", line 1483, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "D:\Pycharm\PyCharm Community Edition 2021.3.2\plugins\python-ce\helpers\pydev_pydev_imps_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "E:/huyi/MNAD/Evaluate.py", line 184, in
main()
File "E:/huyi/MNAD/Evaluate.py", line 142, in main
outputs, feas, updated_feas, m_items_test, softmax_score_query, softmax_score_memory, _, _, _, compactness_loss = model.forward(imgs[:, 0:3 * 4], m_items_test, False)
File "E:\huyi\MNAD\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 150, in forward
updated_fea, keys, softmax_score_query, softmax_score_memory,query, top1_keys, keys_ind, compactness_loss = self.memory(fea, keys, train)
File "D:\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, **kwargs)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 148, in forward
compactness_loss, query_re, top1_keys, keys_ind = self.gather_loss(query,keys, train)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 215, in gather_loss
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 120, in get_score
score = torch.matmul(query, torch.t(mem))# b X h X w X m
RuntimeError: cublas runtime error : the GPU program failed to execute at C:/w/1/s/tmp_conda_3.6_035809/conda/conda-bld/pytorch_1556683229598/work/aten/src/THC/THCBlas.cu:259
my environment is windows+A5000+python 3.6.2+pytorch 1.1.0
Could you tell me how I can solve it?
The text was updated successfully, but these errors were encountered:
Traceback (most recent call last):
File "D:\Pycharm\PyCharm Community Edition 2021.3.2\plugins\python-ce\helpers\pydev\pydevd.py", line 1483, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "D:\Pycharm\PyCharm Community Edition 2021.3.2\plugins\python-ce\helpers\pydev_pydev_imps_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "E:/huyi/MNAD/Evaluate.py", line 184, in
main()
File "E:/huyi/MNAD/Evaluate.py", line 142, in main
outputs, feas, updated_feas, m_items_test, softmax_score_query, softmax_score_memory, _, _, _, compactness_loss = model.forward(imgs[:, 0:3 * 4], m_items_test, False)
File "E:\huyi\MNAD\model\final_future_prediction_with_memory_spatial_sumonly_weight_ranking_top1.py", line 150, in forward
updated_fea, keys, softmax_score_query, softmax_score_memory,query, top1_keys, keys_ind, compactness_loss = self.memory(fea, keys, train)
File "D:\Anaconda\envs\MNAD\lib\site-packages\torch\nn\modules\module.py", line 493, in call
result = self.forward(*input, **kwargs)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 148, in forward
compactness_loss, query_re, top1_keys, keys_ind = self.gather_loss(query,keys, train)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 215, in gather_loss
softmax_score_query, softmax_score_memory = self.get_score(keys, query)
File "E:\huyi\MNAD\model\memory_final_spatial_sumonly_weight_ranking_top1.py", line 120, in get_score
score = torch.matmul(query, torch.t(mem))# b X h X w X m
RuntimeError: cublas runtime error : the GPU program failed to execute at C:/w/1/s/tmp_conda_3.6_035809/conda/conda-bld/pytorch_1556683229598/work/aten/src/THC/THCBlas.cu:259
my environment is windows+A5000+python 3.6.2+pytorch 1.1.0
Could you tell me how I can solve it?
The text was updated successfully, but these errors were encountered: