From c39869fbfc5534238d84536ae6537f7ad22af590 Mon Sep 17 00:00:00 2001 From: dhruvtriv3di <105567548+dhruvtriv3di@users.noreply.github.com> Date: Wed, 26 Jul 2023 11:15:59 -0700 Subject: [PATCH] Add files via upload --- Scripts/__pycache__/chemistry.cpython-38.pyc | Bin 0 -> 1985 bytes .../__pycache__/getENSDFdata.cpython-38.pyc | Bin 0 -> 3342 bytes Scripts/__pycache__/parseENSDF.cpython-38.pyc | Bin 0 -> 2696 bytes Scripts/chemistry.py | 2 +- Scripts/chemistry.py.bak | 33 ++ Scripts/getAbundance.py | 6 +- Scripts/getAbundance.py.bak | 24 + Scripts/getENSDFdata.py | 28 +- Scripts/getENSDFdata.py.bak | 116 ++++ Scripts/getNaturalIsotopes.py | 4 +- Scripts/getNaturalIsotopes.py.bak | 25 + Scripts/neucbot.py | 541 ++++++++++++++++++ Scripts/neucbot.py.bak | 541 ++++++++++++++++++ Scripts/parseENSDF.py | 2 +- Scripts/parseENSDF.py.bak | 152 +++++ 15 files changed, 1453 insertions(+), 21 deletions(-) create mode 100644 Scripts/__pycache__/chemistry.cpython-38.pyc create mode 100644 Scripts/__pycache__/getENSDFdata.cpython-38.pyc create mode 100644 Scripts/__pycache__/parseENSDF.cpython-38.pyc create mode 100644 Scripts/chemistry.py.bak create mode 100644 Scripts/getAbundance.py.bak create mode 100644 Scripts/getENSDFdata.py.bak create mode 100644 Scripts/getNaturalIsotopes.py.bak create mode 100644 Scripts/neucbot.py create mode 100644 Scripts/neucbot.py.bak create mode 100644 Scripts/parseENSDF.py.bak diff --git a/Scripts/__pycache__/chemistry.cpython-38.pyc b/Scripts/__pycache__/chemistry.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33e672b235f684b6e1c266ddb7b4e1b6312116cd GIT binary patch literal 1985 zcmdtiNmCm~7zW_!5ylvUjd$?A+rkEUUol1tn-M~m#5Ojzh28;Vw7|>=0wJe3mBoit z<(z|k%wL*oE}`-da?1PSIOY6>3^mX6*ZuX^-BV&F+uK_$UVr}b9`Bni>vsiTUh517 zc>B%9vS7i$Mg&dRg=XwV3-+KDd(nn=?8AO^-~bNd5Dw!Aj^Y?P(S_qUfs;6e(>Q~( zIEVANfQz_<%eaE8h@ucVh*qH8gEdRRfwf=P91H9C>Ye2nY+#d58(-(G=+-vhg|%&qh-ebK1Ye=Gy<4=1 zJwh*dd#`8{?P8zUFFM2laZnr*hs6g+qA2D?NkF(_ zK`e?TQ5J8-vZ#ovsEN8*5v#%zz6eA^tdZziXhs}+#C7a+*|8IDXcD1Gh9(o5iO>u= zc7{WlqGOLQI(EF`*!gm3vZ2X4c9K!t3(XjtOvslUyTHLIh92{4jy=S=$vWq=bh6>t z4*Q0gI?VpK&nbK+m^;QhH_w>WW|_t?!*HBa3#^*s)ZCI|XTfRQMv~7_mdfj9UV*zF zb?h0f#;OUfkmj9aoK-2NXRA!*J;+kSj0>EW+Tg*t#2|AD9Ghidn&Tr2jEkX}=Bg|) z!IGm)$p&0q!`wlEN6#`n!GmR4BF?d#4$Qn9myYXQXvZuw$sMe)$*FO!lH{FO=98-o zvh)z+aqcL?GHFd`e~P8^TqeO1Bg`3R`4pcy9lqv7CL8P+s;I%r=`}TcQ7`H1@cB|( z-w6HAef|Fj)|U0be6+eOeo0IwYKJXu#jU!vzzNYS9l|FP1NP<(0sXm6qIU*$=#p-j$88 zY0(WFeLQ@9xh&)TME|o{_5ZusZU%uBm=E?x>synu<~qMIf!Q)$JixN;wQU*OGCag; z8w`(PXTqjTz2-(EVN*qGe#iM?)3Uqa^NeQG`=0U6VLtJrhFiI%!B3~xXUuOQmgz8U zrqy^qvz>`Xb_(t6b*JZx5_&Vvi2qQtVjGJ{%SmL$N% z0-Rlt5wbYZxg_P3a>yMx+GqU&|BSi%#7lkvPx*R4>Xk!)+Mb?QchB_K-LqXRx(vU{ zzyBeYij4h4op*m0I-lc<{|O?P;34z4ZjJNSJZ~+}(&M&g>o((Mber|EXpN9{9naw; z!whp>*K;|$&x9rHZ<(+I_r&z_A|tZjGOr*UkwaS)uE?V;1!Yks^yg-vX;##MGJI2;6V0@i(c|Ix!IzKjFeqUm>k20EYPiqRfQXm56XGNbXdCENWZ zZtMlg#^%%YFNB}?_5Mp~LslMNOuk~=<-_tD+v{3=iw=JABeUL|;1}pjFCgXX?1Ybu z)Q}>V5?V=v4YV=H`2sK-)>#Q+)a^>F7I^Z;TNM06gFeRQ99nc4%G#H~u0C_k zR7QP8@dL$=^uUzlAV|5lL)>?(*X`e(A;B4t4E_!qUVD3>IPqoD^y|07Uds>TyI?MM z)eK2{R3`XK5<~(n$fE1nev-&o63#zVnLU?Q#+=s2c!^tlIQu{PHfy$Wn$5`X2F<2& zo6T-d3_{`y&F10252rKQ8IsI@M-@3+(h((H8rh*;<#5N+BQ3hmiywKCHr&G({|7|y zk(rvpSYRn1vk6;Z30!l+7Z6AbY-A-yY6#Om0cOq!vf`py_- zzhu#MizV5PbKuH8XY@~U!l7_VaPdQCg#XV9gHCyQ)gm~ zOA~H=EIo5NDQu?}oE)`OP+GHxh4h+?rjwCX^-;>U`e(uIAGuut0E zpf^a)N*h~SkGEE<&mKJ5c)b1W%(`8_arexAzH;-k9mU;KirS(1=Wi%M*t?;>7uV*M z+w%ME#1GrUK$&qOmDTG9k)&8r8N_SYexb5*npjj812nBNaj$g{Byx$C*?rlL5@kq| zFnFOX8TbNA0$;ZFm95c*rXRKwWro&{Jus?wR`2x& zp{Pc^q`KRVMAZ-b`w-icRRnVNPW7*&k0Fd&`7;thDgBhH(<#*?#h(n#mHJ9;S>;;$ zK^GNFzVs}d)bm`b?_tmlqD20j*4l%9Kald5;N`D~Tq8mk?^(M+Amk0O%5H@{ye3G6 z{0)&Bk>Ao(#)p7N$Y0Y4`ASl1IL}~^+>@Szg114OvgkPSV_HxLQ5l@6>nHLiI<-sk z7EOFY6Wqf?&mT0`rj3VZ9(p*v=aBcC$ZB3rbCkZkOkcmrJO|@VSQDcz>or+a(kXL3 zBY#IaJ|*%8BA*6A{<0#@tmP`5tMoAO}-WZvQl}>qc zy1Hg>>HryD-;QyotJV64kHPA5IDnwDQ+>X+BQz*QLzTGWpx7@_A8t!XxEvt9|lME%6`Q@_hKc)h~; z;1qHfy(umTvZ$diW4R7)3Q&Tomy7qhBd8vPas&h6=2 a?JmiSDYq=b_UXR>$Is-Q1?P_AI{d%XR~4TC literal 0 HcmV?d00001 diff --git a/Scripts/__pycache__/parseENSDF.cpython-38.pyc b/Scripts/__pycache__/parseENSDF.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dc0d9a389d0c9268f8a9cabc7fd80fa58b2eaef GIT binary patch literal 2696 zcmd5-&u<$=6rR~1_IkZ`(ok?iOG%*u45|$mB&t$1YD$GvEKn#1S5eq_Ht9C@+L@WO zinTr&P9Riq;G7&QapJ_EQq2_!VR7k+Lr;8f>@@ual{m4heKYgs&D)uI?|t9(N+o1? z{`~PfQL8fc8x0Ebj@`CvTd(a2*7k)X+;3RBAUxrtEsBCD zq76hKN@z<-D8g+=ewCCGcB?$*96ncBy*z<`*6O&_%$_;4KEBKN)J4viW;q-4k(;|K z>@Ht>cjRfFdt)y6Hj_&`QSQ;-zjYSb$VcBrTgb7KZC75jvpzJ%yl6F_TTKC)AP+|e`y8S5PR^G>9l9H$$V zVP6qdI~(QEzI(vb{TyDm9d#cz5o~DRRWWC6DtX1)M0tg5!KQO#wTzEkGT_H&>kdiXJ$K+eI*~`uADv_!w zZpz);&6WP03|kednI*fOjlMo?&-G%Z6WLtt$aJ7pa}Z0FTxqSYe9{=~nbKCGuf{#t z&>g935R3;r zickb7!F>m*{wvbgeQqImn0+uS1Zrn1HD9-dxY5|R+k zmV_kiksb*u4oQ_^__CycODY9vtEWu8h(v`1K$A&ADmhQ$7(_jg3)H8|p^6acR;uN3 zQc)er8i^Am7D>EHf--d~-bT-++j4hsGgcpB_@C14_$=K{*>qc=(NVg+2d^E%fy0N+ zM@P3<`vW>NtmQdhVQcxw0R}oVq&*GHa~>hBi#{Q(2W%pwy@>lS?Gdka&Z{n-FbZ5q8N}NbyvFjsXuoeFEYVSShk1T%bCCE-qLcC;$g! z1C~#OfO3j}*oNSDbfzHyxw-O$<^tCbvc!et%w9OhMKR$m5+ypqCt4JR8Uq15ah2d-B@0e=U_LXf*n>H&6QZk&6T7R@2RHoK{jG@=ru&F zhm}Ut7SFX*0mz5pw32Ab@H@8F(?9UbhrDEXSf85~D!2!nWn2EL+E(8sEl#*yqV6@=a6((3;r){sVV{fk7Q|)43HRzD?BeulPbVoH}dlt!p3%$PB?Isr~Y>GgTssk@J6EuQmPzz25{BQOnLsI|% literal 0 HcmV?d00001 diff --git a/Scripts/chemistry.py b/Scripts/chemistry.py index 4b3f529f..c9abb10a 100755 --- a/Scripts/chemistry.py +++ b/Scripts/chemistry.py @@ -27,7 +27,7 @@ def getZ(ele): return elements[ele.capitalize()] def getElement(z): - for ele in elements.keys(): + for ele in list(elements.keys()): if elements[ele] == z: return ele return "None" diff --git a/Scripts/chemistry.py.bak b/Scripts/chemistry.py.bak new file mode 100644 index 00000000..4b3f529f --- /dev/null +++ b/Scripts/chemistry.py.bak @@ -0,0 +1,33 @@ +elements = {'H' : 1, 'He' : 2, 'Li' : 3, 'Be' : 4, 'B' : 5, + 'C' : 6, 'N' : 7, 'O' : 8, 'F' : 9, 'Ne' : 10, + 'Na' : 11,'Mg' : 12,'Al' : 13,'Si' : 14,'P' : 15, + 'S' : 16,'Cl' : 17,'Ar' : 18,'K' : 19,'Ca' : 20, + 'Sc' : 21,'Ti' : 22,'V' : 23,'Cr' : 24,'Mn' : 25, + 'Fe' : 26,'Co' : 27,'Ni' : 28,'Cu' : 29,'Zn' : 30, + 'Ga' : 31,'Ge' : 32,'As' : 33,'Se' : 34,'Br' : 35, + 'Kr' : 36,'Rb' : 37,'Sr' : 38,'Y' : 39,'Zr' : 40, + 'Nb' : 41,'Mo' : 42,'Tc' : 43,'Ru' : 44,'Rh' : 45, + 'Pd' : 46,'Ag' : 47,'Cd' : 48,'In' : 49,'Sn' : 50, + 'Sb' : 51,'Te' : 52,'I' : 53,'Xe' : 54,'Cs' : 55, + 'Ba' : 56,'La' : 57,'Ce' : 58,'Pr' : 59,'Nd' : 60, + 'Pm' : 61,'Sm' : 62,'Eu' : 63,'Gd' : 64,'Tb' : 65, + 'Dy' : 66,'Ho' : 67,'Er' : 68,'Tm' : 69,'Yb' : 70, + 'Lu' : 71,'Hf' : 72,'Ta' : 73,'W' : 74,'Re' : 75, + 'Os' : 76,'Ir' : 77,'Pt' : 78,'Au' : 79,'Hg' : 80, + 'Tl' : 81,'Pb' : 82,'Bi' : 83,'Po' : 84,'At' : 85, + 'Rn' : 86,'Fr' : 87,'Ra' : 88,'Ac' : 89,'Th' : 90, + 'Pa' : 91,'U' : 92,'Np' : 93,'Pu' : 94,'Am' : 95, + 'Cm' : 96,'Bk' : 97,'Cf' : 98,'Es' : 99,'Fm' :100, + 'Md' :101,'No' :102,'Lr' :103,'Rf' :104,'Db' :105, + 'Sg' :106,'Bh' :107,'Hs' :108,'Mt' :109,'Ds' :110, + 'Rg' :111,'Cn' :112,'Uut':113,'Fl' :114,'Uup':115, + 'Lv' :116,'Uus':117,'Uuo':118} + +def getZ(ele): + return elements[ele.capitalize()] + +def getElement(z): + for ele in elements.keys(): + if elements[ele] == z: + return ele + return "None" diff --git a/Scripts/getAbundance.py b/Scripts/getAbundance.py index c65b2be3..054cdc68 100755 --- a/Scripts/getAbundance.py +++ b/Scripts/getAbundance.py @@ -6,17 +6,17 @@ def findAbundance(iso): dir = "./Data/" fname = dir + "abundances.dat" f = open(fname) - tokens = map(lambda line: re.split(" ", line), f.readlines()) + tokens = [re.split(" ", line) for line in f.readlines()] for words in tokens: for word in words: if word == iso: - return filter(None,words)[2] + return [_f for _f in words if _f][2] def main(argv): iso = sys.argv[1] abundance = findAbundance(iso) - print abundance + print(abundance) return abundance if __name__ == '__main__' : diff --git a/Scripts/getAbundance.py.bak b/Scripts/getAbundance.py.bak new file mode 100644 index 00000000..c65b2be3 --- /dev/null +++ b/Scripts/getAbundance.py.bak @@ -0,0 +1,24 @@ +#!/usr/bin/python +import sys +import re + +def findAbundance(iso): + dir = "./Data/" + fname = dir + "abundances.dat" + f = open(fname) + tokens = map(lambda line: re.split(" ", line), f.readlines()) + + for words in tokens: + for word in words: + if word == iso: + return filter(None,words)[2] + +def main(argv): + iso = sys.argv[1] + abundance = findAbundance(iso) + print abundance + return abundance + +if __name__ == '__main__' : + main(sys.argv) + diff --git a/Scripts/getENSDFdata.py b/Scripts/getENSDFdata.py index 885ea6bb..90344171 100755 --- a/Scripts/getENSDFdata.py +++ b/Scripts/getENSDFdata.py @@ -1,11 +1,11 @@ #!/usr/bin/python -import urllib2 +import urllib.request, urllib.error, urllib.parse import re import sys import chemistry import socket -from urllib2 import Request -from sgmllib import SGMLParser +from urllib.request import Request +from sgmllib3k import SGMLParser class URLLister(SGMLParser): def reset(self): @@ -25,10 +25,10 @@ def getPage(url, ele, A): req = Request(url) page = '' try: - page = urllib2.urlopen(req,timeout=3) + page = urllib.request.urlopen(req,timeout=3) except socket.timeout: - print "ERROR: TIMEOUT" - print url + print("ERROR: TIMEOUT") + print(url) text = re.sub('<.*>','',page.read()) text = text[re.search('[0-9]',text).start():] @@ -49,7 +49,7 @@ def getURL(ele, A): # nndc_url = 'https://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds' nndc_url = 'https://www.nndc.bnl.gov/nudat2/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds' - nndc_page = urllib2.urlopen(nndc_url,timeout=3) + nndc_page = urllib.request.urlopen(nndc_url,timeout=3) parser = URLLister() parser.feed(nndc_page.read()) parser.close() @@ -64,14 +64,14 @@ def getURL(ele, A): #url = 'https://www.nndc.bnl.gov/chart/' + url_end url = 'https://www.nndc.bnl.gov/nudat2/' + url_end - print 'Retrieving ENSDF data from:\t',url + print('Retrieving ENSDF data from:\t',url) req = Request(url) page = '' try: - page = urllib2.urlopen(req,timeout=3) - except (socket.timeout, urllib2.URLError): - print "ERROR: TIMEOUT" - print url + page = urllib.request.urlopen(req,timeout=3) + except (socket.timeout, urllib.error.URLError): + print("ERROR: TIMEOUT") + print(url) text = re.sub('<.*>','',page.read()) @@ -89,7 +89,7 @@ def getURL(ele, A): else : continue if len(text) < 30 : - print 'WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A) + print('WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A)) break # Check that this page is for a ground state decay @@ -104,7 +104,7 @@ def getURL(ele, A): def main(argv): if(len(argv) != 3): - print 'Usage: ./getENSDFdata.py [element] [A]' + print('Usage: ./getENSDFdata.py [element] [A]') return ele = argv[1] diff --git a/Scripts/getENSDFdata.py.bak b/Scripts/getENSDFdata.py.bak new file mode 100644 index 00000000..885ea6bb --- /dev/null +++ b/Scripts/getENSDFdata.py.bak @@ -0,0 +1,116 @@ +#!/usr/bin/python +import urllib2 +import re +import sys +import chemistry +import socket +from urllib2 import Request +from sgmllib import SGMLParser + +class URLLister(SGMLParser): + def reset(self): + SGMLParser.reset(self) + self.urls = [] + + def start_a(self, attrs): + href = [v for k, v in attrs if k=='href'] + if href: + self.urls.extend(href) + + +def getPage(url, ele, A): + dir = './Data/Decays/' + fname = dir + 'ensdf' + ele.capitalize() + str(A) + '.dat' + f = open(fname,'w') + req = Request(url) + page = '' + try: + page = urllib2.urlopen(req,timeout=3) + except socket.timeout: + print "ERROR: TIMEOUT" + print url + + text = re.sub('<.*>','',page.read()) + text = text[re.search('[0-9]',text).start():] + newtext = "" + for line in text.split('\n'): + if(len(line)>1): + newtext += line + '\n' + text = re.sub(r'^\t','',newtext,flags=re.MULTILINE) + #print(text) + f.write(text) + return page.read() + +def getURL(ele, A): + Z = chemistry.getZ(ele) + dau_A = A - 4 + dau_Z = Z-2 + dau_ele = chemistry.getElement(dau_Z) + + # nndc_url = 'https://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds' + nndc_url = 'https://www.nndc.bnl.gov/nudat2/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds' + nndc_page = urllib2.urlopen(nndc_url,timeout=3) + parser = URLLister() + parser.feed(nndc_page.read()) + parser.close() + nndc_page.close() + url_ends = [] + for a_url in parser.urls: + mod_url = re.sub(' ','%20',a_url) + if re.search('getdecaydataset',mod_url) and re.search('a%20decay',mod_url): + url_ends.append(mod_url) + + for url_end in url_ends: + #url = 'https://www.nndc.bnl.gov/chart/' + url_end + url = 'https://www.nndc.bnl.gov/nudat2/' + url_end + + print 'Retrieving ENSDF data from:\t',url + req = Request(url) + page = '' + try: + page = urllib2.urlopen(req,timeout=3) + except (socket.timeout, urllib2.URLError): + print "ERROR: TIMEOUT" + print url + + text = re.sub('<.*>','',page.read()) + + # Check that this page is for an alpha decay + is_adecay = re.search(" A DECAY",text) + if not is_adecay: + continue + adecay_pos = text.find("A DECAY") + if adecay_pos > 0 and adecay_pos < 30 : + break + + # Prune the page and check that it might have interesting content + if re.search('[0-9]',text) : + text = text[re.search('[0-9]',text).start():] + else : + continue + if len(text) < 30 : + print 'WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A) + break + + # Check that this page is for a ground state decay + level = 0 + for line in text.split('\n'): + if len(line) > 8 and line[6] == ' ' and line[7] == 'P': + level = line.split()[2] + if level == '0.0': + break + + return url + +def main(argv): + if(len(argv) != 3): + print 'Usage: ./getENSDFdata.py [element] [A]' + return + + ele = argv[1] + A = int(argv[2]) + url = getURL(ele,A) + getPage(url, ele, A) + +if __name__ == "__main__": + main(sys.argv) diff --git a/Scripts/getNaturalIsotopes.py b/Scripts/getNaturalIsotopes.py index 4dc9af76..2dd05294 100755 --- a/Scripts/getNaturalIsotopes.py +++ b/Scripts/getNaturalIsotopes.py @@ -6,7 +6,7 @@ def findIsotopes(ele): dir = "./Data/" fname = dir + "abundances.dat" f = open(fname) - tokens = map(lambda line: re.split(" ", line), f.readlines()) + tokens = [re.split(" ", line) for line in f.readlines()] isotopes = "" @@ -19,7 +19,7 @@ def findIsotopes(ele): def main(argv): ele = argv[1] - print findIsotopes(ele) + print(findIsotopes(ele)) if __name__ == '__main__': main(sys.argv) diff --git a/Scripts/getNaturalIsotopes.py.bak b/Scripts/getNaturalIsotopes.py.bak new file mode 100644 index 00000000..4dc9af76 --- /dev/null +++ b/Scripts/getNaturalIsotopes.py.bak @@ -0,0 +1,25 @@ +#!/usr/bin/python +import sys +import re + +def findIsotopes(ele): + dir = "./Data/" + fname = dir + "abundances.dat" + f = open(fname) + tokens = map(lambda line: re.split(" ", line), f.readlines()) + + isotopes = "" + + for words in tokens: + for word in words: + if re.sub('[0-9]','',word) == ele.capitalize(): + isotopes += re.sub('[A-Z a-z]','',word) + " " + + return isotopes + +def main(argv): + ele = argv[1] + print findIsotopes(ele) + +if __name__ == '__main__': + main(sys.argv) diff --git a/Scripts/neucbot.py b/Scripts/neucbot.py new file mode 100644 index 00000000..ea3f1370 --- /dev/null +++ b/Scripts/neucbot.py @@ -0,0 +1,541 @@ +#!/usr/bin/python + +import sys +import os +sys.path.insert(0, './Scripts/') +import re +import subprocess +import shutil +import math +import parseENSDF as ensdf +import getNaturalIsotopes as gni +import getAbundance as isoabund + +class constants: + N_A = 6.0221409e+23 + MeV_to_keV= 1.e3 + mb_to_cm2 = 1.e-27 + year_to_s = 31536000 + min_bin = 0 # keV + max_bin = 20000 # keV + delta_bin = 100 # keV + run_talys = False + run_alphas = True + print_alphas = False + download_data = False + download_version = 2 + force_recalculation = False + ofile = sys.stdout + +class material: + def __init__(self,e,a,f): + self.ele = str(e) + self.A = float(a) + self.frac = float(f) + +def isoDir(ele,A): + return './Data/Isotopes/'+ele.capitalize()+'/'+ele.capitalize()+str(int(A))+'/' + +def parseIsotope(iso): + ele = '' + A = 0 + for i in iso: + if i.isalpha(): + ele += i + if i.isdigit(): + A = A*10 + int(i) + return [ele,A] + +def generateAlphaFileName(ele,A): + outdir = './AlphaLists/' + fName = outdir + ele.capitalize() + str(A) + 'Alphas.dat' + return fName + +def generateAlphaList(ele, A): + print('generateAlphaList(',ele,A,')',file=constants.ofile) + ensdf.main(['parseENSDF',ele,A]) + +def loadAlphaList(fname): + f = open(fname) + tokens = [line.split() for line in f.readlines()] + alpha_list = [] + for words in tokens: + if words[0][0] == '#' or len(words) < 2: + continue + + alpha = [] + for word in words: + alpha.append(float(word)) + alpha_list.append(alpha) + f.close() + return alpha_list + +def getAlphaList(ele,A): + fname = generateAlphaFileName(ele,A) + return loadAlphaList(fname) + +def getAlphaListIfExists(ele,A): + fName = generateAlphaFileName(ele,A) + tries = 3 + while not os.path.isfile(fName): + if tries < 0: + print('Cannot generate alpha list for ele = ', ele, ' and A = ', A,file = constants.ofile) + return 0 + print('generating alpha file ', fName, file = constants.ofile) + generateAlphaList(ele,A) + tries -= 1 + return getAlphaList(ele,A) + +def loadChainAlphaList(fname): + f = open(fname) + tokens = [line.split() for line in f.readlines()] + alpha_list = [] + for line in tokens: + if len(line) < 2 or line[0][0] == '#': + continue + # Read isotope and its branching ratio from file + iso = line[0] + br = float(line[1]) + [ele,A] = parseIsotope(iso) + + # Now get the isotope's alpha list and add it to the chain's list + aList_forIso = getAlphaListIfExists(ele,A) + if constants.print_alphas: + print(iso, file = constants.ofile) + print('\t', aList_forIso, file = constants.ofile) + for [ene,intensity] in aList_forIso: + alpha_list.append([ene, intensity*br/100]) + return alpha_list + +def readTargetMaterial(fname): + f = open(fname) + mat_comp = [] + tokens = [line.split() for line in f.readlines()] + for line in tokens: + if len(line) < 3: + continue + if line[0][0] == '#': + continue + ele = line[0].lower() + A = int(line[1]) + frac = float(line[2]) + + if A == 0: + natIso_list = gni.findIsotopes(ele).split() + for A_i in natIso_list: + abund = float(isoabund.findAbundance(str(A_i)+ele.capitalize())) + mater = material(ele,A_i,frac*abund/100.) + mat_comp.append(mater) + else: + mater = material(ele,A,frac) + mat_comp.append(mater) + + # Normalize + norm = 0 + for mat in mat_comp: + norm += mat.frac + for mat in mat_comp: + mat.frac /= norm + + return mat_comp + +def calcStoppingPower(e_alpha_MeV,mat_comp): + # Stopping power as units of keV/(mg/cm^2) or MeV/(g/cm^2) + e_alpha = e_alpha_MeV + sp_total = 0 + # First, reduce the material to combine all isotopes with the same Z + mat_comp_reduced = {} + for mat in mat_comp: + if mat.ele in mat_comp_reduced: + mat_comp_reduced[mat.ele] += mat.frac + else: + mat_comp_reduced[mat.ele] = mat.frac + + # Then, for each element, get the stopping power at this alpha energy + for mat in mat_comp_reduced: + spDir = './Data/StoppingPowers/' + spFile = spDir + mat.lower() + '.dat' + spf = open(spFile) + + tokens = [line.split() for line in spf.readlines()] + first = True + sp_found = False + e_curr = 0 + e_last = 0 + sp_curr = 0 + sp_last = 0 + sp_alpha = 0 + for line in tokens: + if line[0][0] == '#': + continue + e_curr = float(line[0]) + if str(line[1]) == 'keV': + e_curr /= 1000 + elif str(line[1]) == 'MeV': + e_curr *= 1 + sp_curr = float(line[3])+float(line[2]) + + # Alpha energy is below the list. Use the lowest energy in the list + if e_curr > e_alpha and first: + first = False + sp_found = True + sp_alpha = sp_curr + break + # If this entry is above the alpha energy, the alpha is between this + # entry and the previous one + if e_curr > e_alpha: + first = False + sp_alpha = (sp_curr-sp_last)*(e_alpha-e_last)/(e_curr-e_last) + sp_last + sp_found = True + break + # Otherwise, keep looking for the entry + first = False + sp_last = sp_curr + e_last = e_curr + # if the alpha energy is too high for the list, use the highest energy on the list + if not sp_found: + sp_alpha = sp_last + sp_total += sp_alpha * mat_comp_reduced[mat]/100 + return sp_total + +def runTALYS(e_a, ele, A): + iso = str(ele)+str(int(A)) + inpdir = isoDir(ele,A) + 'TalysInputs/' + outdir = isoDir(ele,A) + 'TalysOut/' + nspecdir= isoDir(ele,A) + 'NSpectra/' + if not os.path.exists(inpdir): + os.makedirs(inpdir) + if not os.path.exists(outdir): + os.makedirs(outdir) + if not os.path.exists(nspecdir): + os.makedirs(nspecdir) + +# command = "\nprojectile a\nejectiles p n g\nelement "+ele+"\nmass "+str(int(A))+"\nenergy "+str(e_a)+"\npreequilibrium y\ngiantresonance y\nmultipreeq y\noutspectra y\noutlevels y\noutgamdis y\nfilespectrum n\nelwidth 0.2\n" + command = "\nprojectile a\nejectiles p n g\nelement "+ele+"\nmass "+str(int(A))+"\nenergy "+str(e_a)+"\npreequilibrium y\ngiantresonance y\nmultipreeq y\noutspectra y\noutlevels y\noutgamdis y\nfilespectrum n\nelwidth 0.2" + + inp_fname = inpdir+"inputE"+str(e_a) + inp_f = open(inp_fname,'w') + inp_f.write(command) + inp_f.close() + out_fname = outdir+"outputE"+str(e_a) + + bashcmd = 'talys < '+inp_fname+' > '+out_fname + print('Running TALYS:\t ', bashcmd, file = constants.ofile) + runscript_fname = "./runscript_temp.sh" + runscript_f = open(runscript_fname,"w") + runscript_f.write("#!/usr/bin/bash\n\n"+bashcmd) + runscript_f.close() + process = subprocess.call(bashcmd,shell=True) + # Move the output neutron spectrum to the appropriate directory + ls = os.listdir("./") + + moved_file = False + for f in ls: + if "nspec" in f: + if os.path.exists(nspecdir+f): + os.remove(nspecdir+f) + fname = nspecdir+'nspec{0:0>7.3f}.tot'.format(e_a) + shutil.move(f, fname) + moved_file = True + # If no neutron spectrum file is found, make a blank one + if not moved_file: + fname = nspecdir+'nspec{0:0>7.3f}.tot'.format(e_a) + blank_f = open(fname,'w') + blank_f.write("EMPTY") + blank_f.close() + + +def getMatTerm(mat,mat_comp): + # mat_comp structure: [ele,A,frac] + A = mat.A + conc = mat.frac/100. + mat_term = (constants.N_A * conc)/A + return mat_term + +def getIsotopeDifferentialNSpec(e_a, ele, A): + target = ele+str(int(A)) + path = isoDir(ele,A) + 'NSpectra/' + if not os.path.exists(path): + os.makedirs(path) + fname = path+'nspec{0:0>7.3f}.tot'.format(int(100*e_a)/100.) + outpath = isoDir(ele,A) + 'TalysOut' + if constants.force_recalculation: + print('Forcibily running TALYS for', int(100*e_a)/100., 'alpha on', target, file = constants.ofile) + print('Outpath', outpath, file = constants.ofile) + runTALYS(int(100*e_a)/100.,ele,A) + + # If the file does not exist, run TALYS + if not os.path.exists(fname): + if constants.run_talys: + while not os.path.exists(fname): + print('Running TALYS for', int(100*e_a)/100., 'alpha on', target, file = constants.ofile) + print('Outpath', outpath, file = constants.ofile) + runTALYS(int(100*e_a)/100.,ele,A) + ls = os.listdir(outpath) + else: + print("Warning, no (alpha,n) data found for E_a =", e_a,"MeV on target", target,"...skipping. Consider running with the -d or -t options", file = constants.ofile) + return {} + + # Load the file + # If no output was produced, skip this energy + if not os.path.exists(outpath): + return {} + + f = open(fname) + spec = {} + tokens = [line.split() for line in f.readlines()] + for line in tokens: + if len(line) < 1 or line[0] == 'EMPTY': + break + if line[0][0] == '#': + continue + # line[0] = E-out + # line[1] = Total + # line[2] = Direct + # line[3] = Pre-equil + # line[4] = Mult. preeq + # line[5] = Compound + # line[6] = Pre-eq ratio + # convert from mb/MeV to cm^2/MeV + energy = int(float(line[0])*constants.MeV_to_keV) + sigma = float(line[1])*constants.mb_to_cm2/constants.MeV_to_keV + spec[energy] = sigma + return spec + +def rebin(histo,step,minbin,maxbin): + nbins = (maxbin-minbin)/step + newhisto = {} + normhisto = {} + for i in sorted(histo): + index = sorted(histo).index(i) + # Get the spacing between points + delta = sorted(histo)[0] + if index > 0: + delta = sorted(histo)[index] - sorted(histo)[index-1] + # If the x value is too low, put it in the underflow bin (-1) + if i < minbin: + print('Underflow: ', i, ' (minbin = ', minbin, ')',file = constants.ofile) + if -1 in newhisto: + newhisto[-1] += histo[i]*delta + normhisto[-1] += delta + else: + newhisto[-1] = histo[i]*delta + normhisto[-1] = delta + # ...or the overflow bin if too high + if i > maxbin: + print('Overflow: ', histo[i], ' (maxbin = ', maxbin,')', file = constants.ofile) + overflowbin = int(nbins+10*step) + if overflowbin in newhisto: + newhisto[overflowbin] += histo[i]*delta + normhisto[overflowbin] += delta + else: + newhisto[overflowbin] = histo[i]*delta + normhisto[overflowbin] = delta + # Otherwise, calculate the bin + newbin = int(minbin+int((i-minbin)/step)*step) + if newbin in newhisto: + newhisto[newbin] += histo[i]*delta + normhisto[newbin] += delta + else: + newhisto[newbin] = histo[i]*delta + normhisto[newbin] = delta + + # Renormalize the new histogram + for i in newhisto: + if normhisto[i] > 0: + newhisto[i] /= normhisto[i] + + return newhisto + +def integrate(histo): + integral = 0 + for i in sorted(histo): + # Get the bin width + delta = sorted(histo)[0] + index = sorted(histo).index(i) + if index > 0: + delta = sorted(histo)[index] - sorted(histo)[index-1] + + integral += histo[i]*delta + return integral + +def readTotalNXsect(e_a,ele,A): + fname = isoDir(ele,A) + 'TalysOut/outputE' + str(int(100*e_a)/100.) + if not os.path.exists(fname): + print("Could not find file ", fname, file = constants.ofile) + return 0 + f = open(fname) + lines = [line.split() for line in f.readlines()] + xsect_line = 0 + for line in lines: + if line == ['2.','Binary','non-elastic','cross','sections','(non-exclusive)']: + break + else: + xsect_line += 1 + + xsect_line += 3 + if len(lines) < xsect_line: + return 0 + if lines[xsect_line][0] != 'neutron': + return 0 + sigma = float(lines[xsect_line][2]) + sigma *= constants.mb_to_cm2 + return sigma + +def condense_alpha_list(alpha_list,alpha_step_size): + alpha_ene_cdf = [] + max_alpha = max(alpha_list) + e_a_max = int(max_alpha[0]*100 + 0.5)/100. + alpha_ene_cdf.append([e_a_max,max_alpha[1]]) + e_a = e_a_max + while e_a > 0: + cum_int = 0 + for alpha in alpha_list: + this_e_a = int(alpha[0]*100+0.5)/100. + if this_e_a >= e_a: + cum_int += alpha[1] + alpha_ene_cdf.append([e_a,cum_int]) + e_a -= alpha_step_size + return alpha_ene_cdf + +def run_alpha(alpha_list, mat_comp, e_alpha_step): + binsize = 0.1 # Bin size for output spectrum + + spec_tot = {} + xsects = {} + total_xsect = 0 + counter = 0 + alpha_ene_cdf = condense_alpha_list(alpha_list,e_alpha_step) + for [e_a, intensity] in alpha_ene_cdf: + counter += 1 + if counter % (int(len(alpha_ene_cdf)/100)) == 0: + sys.stdout.write('\r') + sys.stdout.write("[%-100s] %d%%" % ('='*int(counter*100/len(alpha_ene_cdf)), 100*counter/len(alpha_ene_cdf))) + sys.stdout.flush() + + stopping_power = 0 + if stopping_power == 0: + stopping_power = calcStoppingPower(e_a, mat_comp) + for mat in mat_comp: + mat_term = getMatTerm(mat,mat_comp) + # Get alpha n spectrum for this alpha and this target + spec_raw = getIsotopeDifferentialNSpec(e_a, mat.ele, mat.A) + spec = rebin(spec_raw,constants.delta_bin,constants.min_bin,constants.max_bin) + # Add this spectrum to the total spectrum + delta_ea = e_alpha_step + if e_a - e_alpha_step < 0: + delta_ea = e_a + prefactors = (intensity/100.)*mat_term*delta_ea/stopping_power + xsect = prefactors * readTotalNXsect(e_a,mat.ele,mat.A) + total_xsect += xsect + matname = str(mat.ele)+str(mat.A) + if matname in xsects: + xsects[matname] += xsect + else: + xsects[matname] = xsect + for e in spec: + val = prefactors * spec[e] + if e in spec_tot: + spec_tot[e] += val + else: + spec_tot[e] = val + + sys.stdout.write('\r') + sys.stdout.write("[%-100s] %d%%" % ('='*int((counter*100)/len(alpha_ene_cdf)), 100*(counter+1)/len(alpha_ene_cdf))) + sys.stdout.flush() + print('', file = sys.stdout) + # print out total spectrum + newspec = spec_tot + print('',file = constants.ofile) + print('# Total neutron yield = ', total_xsect, ' n/decay', file = constants.ofile) + for x in sorted(xsects): + print('\t',x,xsects[x], file = constants.ofile) + print('# Integral of spectrum = ', integrate(newspec), " n/decay", file = constants.ofile) + for e in sorted(newspec): + print(e, newspec[e], file = constants.ofile) + +def help_message(): + print('Usage: You must specify an alpha list or decay chain file and a target material file.\nYou may also specify a step size to for integrating the alphas as they slow down in MeV; the default value is 0.01 MeV\n\t-l [alpha list file name]\n\t-c [decay chain file name]\n\t-m [material composition file name]\n\t-s [alpha step size in MeV]\n\t-t (to run TALYS for reactions not in libraries)\n\t-d (download isotopic data for isotopes missing from database; default behavior is v2)\n\t\t-d v1 (use V1 database, TALYS-1.6)\n\t-d v2 (use V2 database, TALYS-1.95)\n\t-o [output file name]', file = sys.stdout) + +def main(): + alpha_list = [] + mat_comp = [] + alpha_step_size = 0.01 #MeV (default value) + # Load arguments + for arg in sys.argv: + if arg == '-l': + alphalist_file = sys.argv[sys.argv.index(arg)+1] + print('load alpha list', alphalist_file, file = sys.stdout) + alpha_list = loadAlphaList(alphalist_file) + if arg == '-c': + chain_file = sys.argv[sys.argv.index(arg)+1] + print('load alpha chain', chain_file, file = sys.stdout) + alpha_list = loadChainAlphaList(chain_file) + if arg == '-m': + mat_file = sys.argv[sys.argv.index(arg)+1] + print('load target material', mat_file, file = sys.stdout) + mat_comp = readTargetMaterial(mat_file) + if arg == '-s': + alpha_step_size = float(sys.argv[sys.argv.index(arg)+1]) + print('step size', alpha_step_size, file = sys.stdout) + if arg == '-h': + help_message() + return 0 + if arg == '-t': + constants.run_talys = True + if arg == '-d': + constants.download_data = True + constants.download_version = 2 + version_choice = sys.argv[sys.argv.index(arg)+1] + if (not version_choice[0] == '-') and (version_choice[0].lower() == 'v'): + version_num = int(version_choice[1]) + constants.download_version = version_num + print('Downloading data from version',version_num) + if arg == '--print-alphas': + constants.print_alphas = True + if arg == '--print-alphas-only': + print('Only printing alphas', file = sys.stdout) + constants.print_alphas = True + constants.run_alphas = False + if arg == '--force-recalculation': + constants.force_recalculation = True + if arg == '-o': + ofile = str(sys.argv[sys.argv.index(arg)+1]) + print('Printing output to',ofile, file = sys.stdout) + constants.ofile = open(ofile,'w') + #sys.stdout = open(ofile,'w') + + if len(alpha_list) == 0 or len(mat_comp) == 0: + if len(alpha_list)==0: print('No alpha list or chain specified', file = sys.stdout) + if len(mat_comp)==0: print('No target material specified', file = sys.stdout) + print('', file = sys.stdout) + help_message() + return 0 + + if constants.print_alphas: + print('Alpha List: ', file = sys.stdout) + print(max(alpha_list), file = sys.stdout) + condense_alpha_list(alpha_list,alpha_step_size) + for alph in alpha_list: + print(alph[0],'&', alph[1],'\\\\', file = sys.stdout) + + if constants.download_data: + for mat in mat_comp: + ele = mat.ele + if not os.path.exists('./Data/Isotopes/'+ele.capitalize()): + if constants.download_version == 2: + print('\tDownloading (datset V2) data for',ele, file = sys.stdout) + bashcmd = './Scripts/download_element.sh ' + ele + process = subprocess.call(bashcmd,shell=True) + elif constants.download_version == 1: + print('\tDownloading (dataset V1) data for',ele, file = sys.stdout) + bashcmd = './Scripts/download_element_v1.sh ' + ele + process = subprocess.call(bashcmd,shell=True) + + if constants.run_alphas: + print('Running alphas:', file = sys.stdout) + run_alpha(alpha_list, mat_comp, alpha_step_size) + +if __name__ == '__main__': + main() + diff --git a/Scripts/neucbot.py.bak b/Scripts/neucbot.py.bak new file mode 100644 index 00000000..966ea15e --- /dev/null +++ b/Scripts/neucbot.py.bak @@ -0,0 +1,541 @@ +#!/usr/bin/python +from __future__ import print_function +import sys +import os +sys.path.insert(0, './Scripts/') +import re +import subprocess +import shutil +import math +import parseENSDF as ensdf +import getNaturalIsotopes as gni +import getAbundance as isoabund + +class constants: + N_A = 6.0221409e+23 + MeV_to_keV= 1.e3 + mb_to_cm2 = 1.e-27 + year_to_s = 31536000 + min_bin = 0 # keV + max_bin = 20000 # keV + delta_bin = 100 # keV + run_talys = False + run_alphas = True + print_alphas = False + download_data = False + download_version = 2 + force_recalculation = False + ofile = sys.stdout + +class material: + def __init__(self,e,a,f): + self.ele = str(e) + self.A = float(a) + self.frac = float(f) + +def isoDir(ele,A): + return './Data/Isotopes/'+ele.capitalize()+'/'+ele.capitalize()+str(int(A))+'/' + +def parseIsotope(iso): + ele = '' + A = 0 + for i in iso: + if i.isalpha(): + ele += i + if i.isdigit(): + A = A*10 + int(i) + return [ele,A] + +def generateAlphaFileName(ele,A): + outdir = './AlphaLists/' + fName = outdir + ele.capitalize() + str(A) + 'Alphas.dat' + return fName + +def generateAlphaList(ele, A): + print('generateAlphaList(',ele,A,')',file=constants.ofile) + ensdf.main(['parseENSDF',ele,A]) + +def loadAlphaList(fname): + f = open(fname) + tokens = map(lambda line: line.split(), f.readlines()) + alpha_list = [] + for words in tokens: + if words[0][0] == '#' or len(words) < 2: + continue + + alpha = [] + for word in words: + alpha.append(float(word)) + alpha_list.append(alpha) + f.close() + return alpha_list + +def getAlphaList(ele,A): + fname = generateAlphaFileName(ele,A) + return loadAlphaList(fname) + +def getAlphaListIfExists(ele,A): + fName = generateAlphaFileName(ele,A) + tries = 3 + while not os.path.isfile(fName): + if tries < 0: + print('Cannot generate alpha list for ele = ', ele, ' and A = ', A,file = constants.ofile) + return 0 + print('generating alpha file ', fName, file = constants.ofile) + generateAlphaList(ele,A) + tries -= 1 + return getAlphaList(ele,A) + +def loadChainAlphaList(fname): + f = open(fname) + tokens = map(lambda line: line.split(), f.readlines()) + alpha_list = [] + for line in tokens: + if len(line) < 2 or line[0][0] == '#': + continue + # Read isotope and its branching ratio from file + iso = line[0] + br = float(line[1]) + [ele,A] = parseIsotope(iso) + + # Now get the isotope's alpha list and add it to the chain's list + aList_forIso = getAlphaListIfExists(ele,A) + if constants.print_alphas: + print(iso, file = constants.ofile) + print('\t', aList_forIso, file = constants.ofile) + for [ene,intensity] in aList_forIso: + alpha_list.append([ene, intensity*br/100]) + return alpha_list + +def readTargetMaterial(fname): + f = open(fname) + mat_comp = [] + tokens = map(lambda line: line.split(), f.readlines()) + for line in tokens: + if len(line) < 3: + continue + if line[0][0] == '#': + continue + ele = line[0].lower() + A = int(line[1]) + frac = float(line[2]) + + if A == 0: + natIso_list = gni.findIsotopes(ele).split() + for A_i in natIso_list: + abund = float(isoabund.findAbundance(str(A_i)+ele.capitalize())) + mater = material(ele,A_i,frac*abund/100.) + mat_comp.append(mater) + else: + mater = material(ele,A,frac) + mat_comp.append(mater) + + # Normalize + norm = 0 + for mat in mat_comp: + norm += mat.frac + for mat in mat_comp: + mat.frac /= norm + + return mat_comp + +def calcStoppingPower(e_alpha_MeV,mat_comp): + # Stopping power as units of keV/(mg/cm^2) or MeV/(g/cm^2) + e_alpha = e_alpha_MeV + sp_total = 0 + # First, reduce the material to combine all isotopes with the same Z + mat_comp_reduced = {} + for mat in mat_comp: + if mat.ele in mat_comp_reduced: + mat_comp_reduced[mat.ele] += mat.frac + else: + mat_comp_reduced[mat.ele] = mat.frac + + # Then, for each element, get the stopping power at this alpha energy + for mat in mat_comp_reduced: + spDir = './Data/StoppingPowers/' + spFile = spDir + mat.lower() + '.dat' + spf = open(spFile) + + tokens = map(lambda line: line.split(), spf.readlines()) + first = True + sp_found = False + e_curr = 0 + e_last = 0 + sp_curr = 0 + sp_last = 0 + sp_alpha = 0 + for line in tokens: + if line[0][0] == '#': + continue + e_curr = float(line[0]) + if str(line[1]) == 'keV': + e_curr /= 1000 + elif str(line[1]) == 'MeV': + e_curr *= 1 + sp_curr = float(line[3])+float(line[2]) + + # Alpha energy is below the list. Use the lowest energy in the list + if e_curr > e_alpha and first: + first = False + sp_found = True + sp_alpha = sp_curr + break + # If this entry is above the alpha energy, the alpha is between this + # entry and the previous one + if e_curr > e_alpha: + first = False + sp_alpha = (sp_curr-sp_last)*(e_alpha-e_last)/(e_curr-e_last) + sp_last + sp_found = True + break + # Otherwise, keep looking for the entry + first = False + sp_last = sp_curr + e_last = e_curr + # if the alpha energy is too high for the list, use the highest energy on the list + if not sp_found: + sp_alpha = sp_last + sp_total += sp_alpha * mat_comp_reduced[mat]/100 + return sp_total + +def runTALYS(e_a, ele, A): + iso = str(ele)+str(int(A)) + inpdir = isoDir(ele,A) + 'TalysInputs/' + outdir = isoDir(ele,A) + 'TalysOut/' + nspecdir= isoDir(ele,A) + 'NSpectra/' + if not os.path.exists(inpdir): + os.makedirs(inpdir) + if not os.path.exists(outdir): + os.makedirs(outdir) + if not os.path.exists(nspecdir): + os.makedirs(nspecdir) + +# command = "\nprojectile a\nejectiles p n g\nelement "+ele+"\nmass "+str(int(A))+"\nenergy "+str(e_a)+"\npreequilibrium y\ngiantresonance y\nmultipreeq y\noutspectra y\noutlevels y\noutgamdis y\nfilespectrum n\nelwidth 0.2\n" + command = "\nprojectile a\nejectiles p n g\nelement "+ele+"\nmass "+str(int(A))+"\nenergy "+str(e_a)+"\npreequilibrium y\ngiantresonance y\nmultipreeq y\noutspectra y\noutlevels y\noutgamdis y\nfilespectrum n\nelwidth 0.2" + + inp_fname = inpdir+"inputE"+str(e_a) + inp_f = open(inp_fname,'w') + inp_f.write(command) + inp_f.close() + out_fname = outdir+"outputE"+str(e_a) + + bashcmd = 'talys < '+inp_fname+' > '+out_fname + print('Running TALYS:\t ', bashcmd, file = constants.ofile) + runscript_fname = "./runscript_temp.sh" + runscript_f = open(runscript_fname,"w") + runscript_f.write("#!/usr/bin/bash\n\n"+bashcmd) + runscript_f.close() + process = subprocess.call(bashcmd,shell=True) + # Move the output neutron spectrum to the appropriate directory + ls = os.listdir("./") + + moved_file = False + for f in ls: + if "nspec" in f: + if os.path.exists(nspecdir+f): + os.remove(nspecdir+f) + fname = nspecdir+'nspec{0:0>7.3f}.tot'.format(e_a) + shutil.move(f, fname) + moved_file = True + # If no neutron spectrum file is found, make a blank one + if not moved_file: + fname = nspecdir+'nspec{0:0>7.3f}.tot'.format(e_a) + blank_f = open(fname,'w') + blank_f.write("EMPTY") + blank_f.close() + + +def getMatTerm(mat,mat_comp): + # mat_comp structure: [ele,A,frac] + A = mat.A + conc = mat.frac/100. + mat_term = (constants.N_A * conc)/A + return mat_term + +def getIsotopeDifferentialNSpec(e_a, ele, A): + target = ele+str(int(A)) + path = isoDir(ele,A) + 'NSpectra/' + if not os.path.exists(path): + os.makedirs(path) + fname = path+'nspec{0:0>7.3f}.tot'.format(int(100*e_a)/100.) + outpath = isoDir(ele,A) + 'TalysOut' + if constants.force_recalculation: + print('Forcibily running TALYS for', int(100*e_a)/100., 'alpha on', target, file = constants.ofile) + print('Outpath', outpath, file = constants.ofile) + runTALYS(int(100*e_a)/100.,ele,A) + + # If the file does not exist, run TALYS + if not os.path.exists(fname): + if constants.run_talys: + while not os.path.exists(fname): + print('Running TALYS for', int(100*e_a)/100., 'alpha on', target, file = constants.ofile) + print('Outpath', outpath, file = constants.ofile) + runTALYS(int(100*e_a)/100.,ele,A) + ls = os.listdir(outpath) + else: + print("Warning, no (alpha,n) data found for E_a =", e_a,"MeV on target", target,"...skipping. Consider running with the -d or -t options", file = constants.ofile) + return {} + + # Load the file + # If no output was produced, skip this energy + if not os.path.exists(outpath): + return {} + + f = open(fname) + spec = {} + tokens = map(lambda line: line.split(), f.readlines()) + for line in tokens: + if len(line) < 1 or line[0] == 'EMPTY': + break + if line[0][0] == '#': + continue + # line[0] = E-out + # line[1] = Total + # line[2] = Direct + # line[3] = Pre-equil + # line[4] = Mult. preeq + # line[5] = Compound + # line[6] = Pre-eq ratio + # convert from mb/MeV to cm^2/MeV + energy = int(float(line[0])*constants.MeV_to_keV) + sigma = float(line[1])*constants.mb_to_cm2/constants.MeV_to_keV + spec[energy] = sigma + return spec + +def rebin(histo,step,minbin,maxbin): + nbins = (maxbin-minbin)/step + newhisto = {} + normhisto = {} + for i in sorted(histo): + index = sorted(histo).index(i) + # Get the spacing between points + delta = sorted(histo)[0] + if index > 0: + delta = sorted(histo)[index] - sorted(histo)[index-1] + # If the x value is too low, put it in the underflow bin (-1) + if i < minbin: + print('Underflow: ', i, ' (minbin = ', minbin, ')',file = constants.ofile) + if -1 in newhisto: + newhisto[-1] += histo[i]*delta + normhisto[-1] += delta + else: + newhisto[-1] = histo[i]*delta + normhisto[-1] = delta + # ...or the overflow bin if too high + if i > maxbin: + print('Overflow: ', histo[i], ' (maxbin = ', maxbin,')', file = constants.ofile) + overflowbin = int(nbins+10*step) + if overflowbin in newhisto: + newhisto[overflowbin] += histo[i]*delta + normhisto[overflowbin] += delta + else: + newhisto[overflowbin] = histo[i]*delta + normhisto[overflowbin] = delta + # Otherwise, calculate the bin + newbin = int(minbin+int((i-minbin)/step)*step) + if newbin in newhisto: + newhisto[newbin] += histo[i]*delta + normhisto[newbin] += delta + else: + newhisto[newbin] = histo[i]*delta + normhisto[newbin] = delta + + # Renormalize the new histogram + for i in newhisto: + if normhisto[i] > 0: + newhisto[i] /= normhisto[i] + + return newhisto + +def integrate(histo): + integral = 0 + for i in sorted(histo): + # Get the bin width + delta = sorted(histo)[0] + index = sorted(histo).index(i) + if index > 0: + delta = sorted(histo)[index] - sorted(histo)[index-1] + + integral += histo[i]*delta + return integral + +def readTotalNXsect(e_a,ele,A): + fname = isoDir(ele,A) + 'TalysOut/outputE' + str(int(100*e_a)/100.) + if not os.path.exists(fname): + print("Could not find file ", fname, file = constants.ofile) + return 0 + f = open(fname) + lines = map(lambda line: line.split(), f.readlines()) + xsect_line = 0 + for line in lines: + if line == ['2.','Binary','non-elastic','cross','sections','(non-exclusive)']: + break + else: + xsect_line += 1 + + xsect_line += 3 + if len(lines) < xsect_line: + return 0 + if lines[xsect_line][0] != 'neutron': + return 0 + sigma = float(lines[xsect_line][2]) + sigma *= constants.mb_to_cm2 + return sigma + +def condense_alpha_list(alpha_list,alpha_step_size): + alpha_ene_cdf = [] + max_alpha = max(alpha_list) + e_a_max = int(max_alpha[0]*100 + 0.5)/100. + alpha_ene_cdf.append([e_a_max,max_alpha[1]]) + e_a = e_a_max + while e_a > 0: + cum_int = 0 + for alpha in alpha_list: + this_e_a = int(alpha[0]*100+0.5)/100. + if this_e_a >= e_a: + cum_int += alpha[1] + alpha_ene_cdf.append([e_a,cum_int]) + e_a -= alpha_step_size + return alpha_ene_cdf + +def run_alpha(alpha_list, mat_comp, e_alpha_step): + binsize = 0.1 # Bin size for output spectrum + + spec_tot = {} + xsects = {} + total_xsect = 0 + counter = 0 + alpha_ene_cdf = condense_alpha_list(alpha_list,e_alpha_step) + for [e_a, intensity] in alpha_ene_cdf: + counter += 1 + if counter % (int(len(alpha_ene_cdf)/100)) == 0: + sys.stdout.write('\r') + sys.stdout.write("[%-100s] %d%%" % ('='*int(counter*100/len(alpha_ene_cdf)), 100*counter/len(alpha_ene_cdf))) + sys.stdout.flush() + + stopping_power = 0 + if stopping_power == 0: + stopping_power = calcStoppingPower(e_a, mat_comp) + for mat in mat_comp: + mat_term = getMatTerm(mat,mat_comp) + # Get alpha n spectrum for this alpha and this target + spec_raw = getIsotopeDifferentialNSpec(e_a, mat.ele, mat.A) + spec = rebin(spec_raw,constants.delta_bin,constants.min_bin,constants.max_bin) + # Add this spectrum to the total spectrum + delta_ea = e_alpha_step + if e_a - e_alpha_step < 0: + delta_ea = e_a + prefactors = (intensity/100.)*mat_term*delta_ea/stopping_power + xsect = prefactors * readTotalNXsect(e_a,mat.ele,mat.A) + total_xsect += xsect + matname = str(mat.ele)+str(mat.A) + if matname in xsects: + xsects[matname] += xsect + else: + xsects[matname] = xsect + for e in spec: + val = prefactors * spec[e] + if e in spec_tot: + spec_tot[e] += val + else: + spec_tot[e] = val + + sys.stdout.write('\r') + sys.stdout.write("[%-100s] %d%%" % ('='*int((counter*100)/len(alpha_ene_cdf)), 100*(counter+1)/len(alpha_ene_cdf))) + sys.stdout.flush() + print('', file = sys.stdout) + # print out total spectrum + newspec = spec_tot + print('',file = constants.ofile) + print('# Total neutron yield = ', total_xsect, ' n/decay', file = constants.ofile) + for x in sorted(xsects): + print('\t',x,xsects[x], file = constants.ofile) + print('# Integral of spectrum = ', integrate(newspec), " n/decay", file = constants.ofile) + for e in sorted(newspec): + print(e, newspec[e], file = constants.ofile) + +def help_message(): + print('Usage: You must specify an alpha list or decay chain file and a target material file.\nYou may also specify a step size to for integrating the alphas as they slow down in MeV; the default value is 0.01 MeV\n\t-l [alpha list file name]\n\t-c [decay chain file name]\n\t-m [material composition file name]\n\t-s [alpha step size in MeV]\n\t-t (to run TALYS for reactions not in libraries)\n\t-d (download isotopic data for isotopes missing from database; default behavior is v2)\n\t\t-d v1 (use V1 database, TALYS-1.6)\n\t-d v2 (use V2 database, TALYS-1.95)\n\t-o [output file name]', file = sys.stdout) + +def main(): + alpha_list = [] + mat_comp = [] + alpha_step_size = 0.01 #MeV (default value) + # Load arguments + for arg in sys.argv: + if arg == '-l': + alphalist_file = sys.argv[sys.argv.index(arg)+1] + print('load alpha list', alphalist_file, file = sys.stdout) + alpha_list = loadAlphaList(alphalist_file) + if arg == '-c': + chain_file = sys.argv[sys.argv.index(arg)+1] + print('load alpha chain', chain_file, file = sys.stdout) + alpha_list = loadChainAlphaList(chain_file) + if arg == '-m': + mat_file = sys.argv[sys.argv.index(arg)+1] + print('load target material', mat_file, file = sys.stdout) + mat_comp = readTargetMaterial(mat_file) + if arg == '-s': + alpha_step_size = float(sys.argv[sys.argv.index(arg)+1]) + print('step size', alpha_step_size, file = sys.stdout) + if arg == '-h': + help_message() + return 0 + if arg == '-t': + constants.run_talys = True + if arg == '-d': + constants.download_data = True + constants.download_version = 2 + version_choice = sys.argv[sys.argv.index(arg)+1] + if (not version_choice[0] == '-') and (version_choice[0].lower() == 'v'): + version_num = int(version_choice[1]) + constants.download_version = version_num + print('Downloading data from version',version_num) + if arg == '--print-alphas': + constants.print_alphas = True + if arg == '--print-alphas-only': + print('Only printing alphas', file = sys.stdout) + constants.print_alphas = True + constants.run_alphas = False + if arg == '--force-recalculation': + constants.force_recalculation = True + if arg == '-o': + ofile = str(sys.argv[sys.argv.index(arg)+1]) + print('Printing output to',ofile, file = sys.stdout) + constants.ofile = open(ofile,'w') + #sys.stdout = open(ofile,'w') + + if len(alpha_list) == 0 or len(mat_comp) == 0: + if len(alpha_list)==0: print('No alpha list or chain specified', file = sys.stdout) + if len(mat_comp)==0: print('No target material specified', file = sys.stdout) + print('', file = sys.stdout) + help_message() + return 0 + + if constants.print_alphas: + print('Alpha List: ', file = sys.stdout) + print(max(alpha_list), file = sys.stdout) + condense_alpha_list(alpha_list,alpha_step_size) + for alph in alpha_list: + print(alph[0],'&', alph[1],'\\\\', file = sys.stdout) + + if constants.download_data: + for mat in mat_comp: + ele = mat.ele + if not os.path.exists('./Data/Isotopes/'+ele.capitalize()): + if constants.download_version == 2: + print('\tDownloading (datset V2) data for',ele, file = sys.stdout) + bashcmd = './Scripts/download_element.sh ' + ele + process = subprocess.call(bashcmd,shell=True) + elif constants.download_version == 1: + print('\tDownloading (dataset V1) data for',ele, file = sys.stdout) + bashcmd = './Scripts/download_element_v1.sh ' + ele + process = subprocess.call(bashcmd,shell=True) + + if constants.run_alphas: + print('Running alphas:', file = sys.stdout) + run_alpha(alpha_list, mat_comp, alpha_step_size) + +if __name__ == '__main__': + main() + diff --git a/Scripts/parseENSDF.py b/Scripts/parseENSDF.py index fa4ad399..745b8935 100755 --- a/Scripts/parseENSDF.py +++ b/Scripts/parseENSDF.py @@ -123,7 +123,7 @@ def getIntensityScale(fname): def main(argv): if(len(argv) != 3): - print 'Usage: ./parseENSDF [element] [A]' + print('Usage: ./parseENSDF [element] [A]') return ele = argv[1] diff --git a/Scripts/parseENSDF.py.bak b/Scripts/parseENSDF.py.bak new file mode 100644 index 00000000..fa4ad399 --- /dev/null +++ b/Scripts/parseENSDF.py.bak @@ -0,0 +1,152 @@ +#!/usr/bin/python +import sys +import os.path +import re +import getENSDFdata + +intensityScale = 0 +def getGammas(fname, energies, probs): + f = open(fname) + for line in f: + if len(line) < 10: + continue + nucid = line[0:5] + blank0 = line[5] + if blank0 != ' ': + continue + comment = line[6] + if comment != ' ': + continue + rad = line[7] + if rad != 'G': + continue + blank1 = line[8] + if blank1 != ' ': + continue + + c = '' + if len(line) > 77: + c = line[77] + if c == 'Q': + continue + + ene = line[9:19] + if ene == ' ': + continue + + ene = float(ene) + dEne = line[19:21] + if not (dEne.isspace() or dEne == ''): + dEne = float(dEne) + + prob = line[21:29] + if (prob.isspace() or prob == ''): + continue + prob = float(prob)*intensityScale + + dri = line[29:31] + if re.match("A-z", dri): + dri = float(dri) + + energies.append(ene) + probs.append(prob) + +def getAlphas(fname, energies, probs): + f = open(fname) + for line in f: + if len(line) < 10: + continue + nucid = line[0:5] + blank0 = line[5] + if blank0 != ' ': + continue + comment = line[6] + if comment != ' ': + continue + rad = line[7] + if rad != 'A': + continue + blank1 = line[8] + if blank1 != ' ': + continue + + c = '' + if len(line) > 77: + c = line[77] + if c == 'Q': + continue + + ene = line[9:19] + if ene == ' ' or not ene[0].isdigit(): + continue + + ene = float(ene) + dEne = line[19:21] + if not (dEne.isspace() or dEne == '' or not dEne[0].isdigit()): + dEne = float(dEne) + + prob = line[21:29] + if (prob.isspace() or prob == ''): + continue + + prob = float(prob) + + dri = line[29:31] + if re.match("A-z", dri): + dri = float(dri) + + energies.append(ene) + probs.append(prob) + +def getIntensityScale(fname): + f = open(fname) + for line in f: + if len(line) < 10: + continue + nucid = line[0:5] + blank0 = line[5] + if blank0 != ' ': + continue + comment = line[6] + if comment != ' ': + continue + rad = line[7] + if rad != 'N': + continue + + scale = line[9:19] + if scale == ' ': + intensityScale = 1 + else : + intensityScale = float(scale) + return intensityScale + +def main(argv): + if(len(argv) != 3): + print 'Usage: ./parseENSDF [element] [A]' + return + + ele = argv[1] + A = int(argv[2]) + indir = './Data/Decays/' + finName = indir + 'ensdf' + ele.capitalize() + str(A) + '.dat' + + if(not os.path.isfile(finName)): + getENSDFdata.main(argv) + + energies = [] + probs = [] + intensityScale = getIntensityScale(finName) + getAlphas(finName,energies,probs) + + outdir = './AlphaLists/' + foutName = outdir + ele.capitalize() + str(A) + 'Alphas.dat' + fout = open(foutName,'w') + + for i in range(0,len(energies)): +# print 'E =',energies[i]/1000,'\tI =',probs[i] + fout.write(str(energies[i]/1000)+'\t'+str(probs[i])+'\n') +# fout.write('{}\t{}\n'.format(energies[i]/1000, probs[i])) + +if __name__ == "__main__": + main(sys.argv)