Browse Source

added LF_scanner.pypulseq for seqgen

spacexerq 2 weeks ago
parent
commit
c9404b0be8
302 changed files with 34064 additions and 0 deletions
  1. 74 0
      LF_scanner/CODE_OF_CONDUCT.md
  2. 30 0
      LF_scanner/CONTRIBUTING.md
  3. 102 0
      LF_scanner/FID_from_scratch.ipynb
  4. 661 0
      LF_scanner/LICENSE
  5. 3 0
      LF_scanner/MANIFEST.in
  6. 201 0
      LF_scanner/README.md
  7. 39 0
      LF_scanner/TSE_20231019_161845.json
  8. 762 0
      LF_scanner/TSE_pulse_sequence-Copy1.ipynb
  9. 762 0
      LF_scanner/TSE_pulse_sequence.ipynb
  10. 762 0
      LF_scanner/TSE_pulse_sequence_T1.ipynb
  11. 722 0
      LF_scanner/TSE_splited_gradients.ipynb
  12. 628 0
      LF_scanner/TSE_splited_gradients_RESTORE.ipynb
  13. 0 0
      LF_scanner/__init__.py
  14. 20 0
      LF_scanner/doc/Makefile
  15. 0 0
      LF_scanner/doc/__init__.py
  16. 35 0
      LF_scanner/doc/make.bat
  17. 2 0
      LF_scanner/doc/readthedocs_requirements.txt
  18. 0 0
      LF_scanner/doc/source/__init__.py
  19. 66 0
      LF_scanner/doc/source/conf.py
  20. 25 0
      LF_scanner/doc/source/index.rst
  21. 9 0
      LF_scanner/doc/source/modules.rst
  22. 21 0
      LF_scanner/doc/source/pypulseq.SAR.rst
  23. 61 0
      LF_scanner/doc/source/pypulseq.Sequence.rst
  24. 319 0
      LF_scanner/doc/source/pypulseq.rst
  25. 117 0
      LF_scanner/doc/source/pypulseq.tests.rst
  26. 7 0
      LF_scanner/doc/source/setup.rst
  27. 7 0
      LF_scanner/doc/source/version.rst
  28. BIN
      LF_scanner/doc/walkthrough/gre_1.png
  29. BIN
      LF_scanner/doc/walkthrough/gre_2.png
  30. 84 0
      LF_scanner/doc/walkthrough/gre_walkthrough.ipynb
  31. BIN
      LF_scanner/logo.png
  32. BIN
      LF_scanner/logo_transparent.png
  33. 0 0
      LF_scanner/new_MRI_pulse_seq/__init__.py
  34. 0 0
      LF_scanner/new_MRI_pulse_seq/pd_TSE/pd_TSE_matrx16x16_fixed_delay.xml
  35. 0 0
      LF_scanner/new_MRI_pulse_seq/pd_TSE/pd_TSE_matrx16x16_myGrad.xml
  36. BIN
      LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_1.h5
  37. BIN
      LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_2.h5
  38. BIN
      LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_3.h5
  39. BIN
      LF_scanner/new_MRI_pulse_seq/t1_FS_TSE/FS_T1_TSE_1.png
  40. BIN
      LF_scanner/new_MRI_pulse_seq/t1_FS_TSE/FS_T1_TSE_2.png
  41. 0 0
      LF_scanner/new_MRI_pulse_seq/t1_TSE/__init__.py
  42. BIN
      LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_1.h5
  43. BIN
      LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_2.h5
  44. BIN
      LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_3.h5
  45. BIN
      LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_4.h5
  46. 0 0
      LF_scanner/new_MRI_pulse_seq/t1_TSE/t1_TSE_matrx16x16_fixed_delay.xml
  47. 0 0
      LF_scanner/new_MRI_pulse_seq/t1_TSE/t1_TSE_matrx16x16_myGrad.xml
  48. 432 0
      LF_scanner/new_MRI_pulse_seq/t1_TSE/write_TSE_T1.py
  49. BIN
      LF_scanner/new_MRI_pulse_seq/t1_se/rf_1.h5
  50. BIN
      LF_scanner/new_MRI_pulse_seq/t1_se/rf_2.h5
  51. BIN
      LF_scanner/new_MRI_pulse_seq/t1_se/rf_3.h5
  52. BIN
      LF_scanner/new_MRI_pulse_seq/t1_se/rf_4.h5
  53. BIN
      LF_scanner/new_MRI_pulse_seq/t1_se/with_gxspoi_without_phases_offsets/different_contrasts.pptx
  54. 0 0
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/IR_t2_TSE_matrx16x16_myGrad.xml
  55. BIN
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/T2_STIR_TSE_1.png
  56. BIN
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/T2_STIR_TSE_2.png
  57. BIN
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_1.h5
  58. BIN
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_2.h5
  59. BIN
      LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_3.h5
  60. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_1.h5
  61. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_2.h5
  62. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_3.h5
  63. 0 0
      LF_scanner/new_MRI_pulse_seq/t2_TSE/t2_TSE_matrx16x16_fixed_delay.xml
  64. 0 0
      LF_scanner/new_MRI_pulse_seq/t2_TSE/t2_TSE_matrx16x16_myGrad.xml
  65. 0 0
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/__init__.py
  66. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_1.h5
  67. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_2.h5
  68. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_3.h5
  69. BIN
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_4.h5
  70. 0 0
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/t2_TSE_RESTORE_matrx16x16_myGrad.xml
  71. 289 0
      LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/write_TSE_T2_RESTORE.py
  72. 106 0
      LF_scanner/paper.bib
  73. 106 0
      LF_scanner/paper.md
  74. 31 0
      LF_scanner/py2jemris/.github/ISSUE_TEMPLATE/bug_report.md
  75. 20 0
      LF_scanner/py2jemris/.github/ISSUE_TEMPLATE/feature_request.md
  76. 120 0
      LF_scanner/py2jemris/.gitignore
  77. 76 0
      LF_scanner/py2jemris/CODE_OF_CONDUCT.md
  78. 27 0
      LF_scanner/py2jemris/CONTRIBUTING.md
  79. 674 0
      LF_scanner/py2jemris/LICENSE
  80. 27 0
      LF_scanner/py2jemris/README.md
  81. 0 0
      LF_scanner/py2jemris/__init__.py
  82. 1 0
      LF_scanner/py2jemris/benchmark_seq2xml/.jemris_progress.out
  83. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/.spins_state.dat
  84. 0 0
      LF_scanner/py2jemris/benchmark_seq2xml/__init__.py
  85. 64 0
      LF_scanner/py2jemris/benchmark_seq2xml/benchmark_seq_files.py
  86. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/ext_rf.h5
  87. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/gre.h5
  88. 23 0
      LF_scanner/py2jemris/benchmark_seq2xml/gre.xml
  89. 0 0
      LF_scanner/py2jemris/benchmark_seq2xml/gre_jemris_seq2xml.xml
  90. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/gre_jemris_seq2xml_jemris.h5
  91. 1 0
      LF_scanner/py2jemris/benchmark_seq2xml/mysimu2.xml
  92. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_1.h5
  93. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_2.h5
  94. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_3.h5
  95. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_4.h5
  96. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_5.h5
  97. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_6.h5
  98. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_7.h5
  99. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/rf_8.h5
  100. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/sample.h5
  101. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/seq_compare.PNG
  102. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/seq_compare_zoomed.PNG
  103. BIN
      LF_scanner/py2jemris/benchmark_seq2xml/signals.h5
  104. 4 0
      LF_scanner/py2jemris/benchmark_seq2xml/uniform.xml
  105. 115 0
      LF_scanner/py2jemris/coil2xml.py
  106. 30 0
      LF_scanner/py2jemris/examine_seq_diag.py
  107. 15 0
      LF_scanner/py2jemris/make_some_seqs.py
  108. 597 0
      LF_scanner/py2jemris/phantom.py
  109. 33 0
      LF_scanner/py2jemris/pull_request_template.md
  110. 210 0
      LF_scanner/py2jemris/pulseq_jemris_simulator.py
  111. 1150 0
      LF_scanner/py2jemris/pulseq_library.py
  112. 2223 0
      LF_scanner/py2jemris/py2jemris_demo.ipynb
  113. 180 0
      LF_scanner/py2jemris/recon_jemris.py
  114. 42 0
      LF_scanner/py2jemris/record_seq2xml_times.py
  115. 5 0
      LF_scanner/py2jemris/requirements.txt
  116. BIN
      LF_scanner/py2jemris/rf_1.h5
  117. BIN
      LF_scanner/py2jemris/rf_10.h5
  118. BIN
      LF_scanner/py2jemris/rf_11.h5
  119. BIN
      LF_scanner/py2jemris/rf_12.h5
  120. BIN
      LF_scanner/py2jemris/rf_13.h5
  121. BIN
      LF_scanner/py2jemris/rf_14.h5
  122. BIN
      LF_scanner/py2jemris/rf_15.h5
  123. BIN
      LF_scanner/py2jemris/rf_16.h5
  124. BIN
      LF_scanner/py2jemris/rf_17.h5
  125. BIN
      LF_scanner/py2jemris/rf_18.h5
  126. BIN
      LF_scanner/py2jemris/rf_19.h5
  127. BIN
      LF_scanner/py2jemris/rf_2.h5
  128. BIN
      LF_scanner/py2jemris/rf_20.h5
  129. BIN
      LF_scanner/py2jemris/rf_21.h5
  130. BIN
      LF_scanner/py2jemris/rf_22.h5
  131. BIN
      LF_scanner/py2jemris/rf_23.h5
  132. BIN
      LF_scanner/py2jemris/rf_24.h5
  133. BIN
      LF_scanner/py2jemris/rf_25.h5
  134. BIN
      LF_scanner/py2jemris/rf_3.h5
  135. BIN
      LF_scanner/py2jemris/rf_4.h5
  136. BIN
      LF_scanner/py2jemris/rf_5.h5
  137. BIN
      LF_scanner/py2jemris/rf_6.h5
  138. BIN
      LF_scanner/py2jemris/rf_7.h5
  139. BIN
      LF_scanner/py2jemris/rf_8.h5
  140. BIN
      LF_scanner/py2jemris/rf_9.h5
  141. BIN
      LF_scanner/py2jemris/sample.h5
  142. 307 0
      LF_scanner/py2jemris/seq2xml.py
  143. 318 0
      LF_scanner/py2jemris/seq2xml_fixed_delay.py
  144. 40 0
      LF_scanner/py2jemris/sim/8chheadcyl.xml
  145. 0 0
      LF_scanner/py2jemris/sim/__init__.py
  146. 22 0
      LF_scanner/py2jemris/sim/epi.xml
  147. 23 0
      LF_scanner/py2jemris/sim/gre.xml
  148. BIN
      LF_scanner/py2jemris/sim/ismrm_abstract/spgr_64_v2/phantom_bottles.mat
  149. BIN
      LF_scanner/py2jemris/sim/sample.h5
  150. 31 0
      LF_scanner/py2jemris/sim/tse.xml
  151. 4 0
      LF_scanner/py2jemris/sim/uniform.xml
  152. BIN
      LF_scanner/py2jemris/sim/utest_outputs/cylindrical.h5
  153. BIN
      LF_scanner/py2jemris/sim/utest_outputs/data32_orig.mat
  154. 28 0
      LF_scanner/py2jemris/sim/utest_outputs/gre32.xml
  155. BIN
      LF_scanner/py2jemris/sim/utest_outputs/signals.h5
  156. 1 0
      LF_scanner/py2jemris/sim/utest_outputs/simu.xml
  157. 4 0
      LF_scanner/py2jemris/sim/utest_outputs/uniform.xml
  158. 53 0
      LF_scanner/py2jemris/sim2xml.py
  159. 181 0
      LF_scanner/py2jemris/sim_jemris.py
  160. 37 0
      LF_scanner/py2jemris/sim_py2jemris_ismrm2021.py
  161. 38 0
      LF_scanner/py2jemris/sim_seq_validation.py
  162. 181 0
      LF_scanner/py2jemris/utest_py2jemris_script.py
  163. BIN
      LF_scanner/pypulseq/SAR/QGlobal.mat
  164. 325 0
      LF_scanner/pypulseq/SAR/SAR_calc.py
  165. 0 0
      LF_scanner/pypulseq/SAR/__init__.py
  166. 0 0
      LF_scanner/pypulseq/Sequence/__init__.py
  167. 637 0
      LF_scanner/pypulseq/Sequence/block.py
  168. 179 0
      LF_scanner/pypulseq/Sequence/calc_grad_spectrum.py
  169. 102 0
      LF_scanner/pypulseq/Sequence/calc_pns.py
  170. 247 0
      LF_scanner/pypulseq/Sequence/ext_test_report.py
  171. 86 0
      LF_scanner/pypulseq/Sequence/parula.py
  172. 660 0
      LF_scanner/pypulseq/Sequence/read_seq.py
  173. 1893 0
      LF_scanner/pypulseq/Sequence/sequence.py
  174. 269 0
      LF_scanner/pypulseq/Sequence/write_seq.py
  175. 41 0
      LF_scanner/pypulseq/__init__.py
  176. 222 0
      LF_scanner/pypulseq/add_gradients.py
  177. 92 0
      LF_scanner/pypulseq/add_ramps.py
  178. 81 0
      LF_scanner/pypulseq/align.py
  179. 53 0
      LF_scanner/pypulseq/block_to_events.py
  180. 61 0
      LF_scanner/pypulseq/calc_duration.py
  181. 355 0
      LF_scanner/pypulseq/calc_ramp.py
  182. 65 0
      LF_scanner/pypulseq/calc_rf_bandwidth.py
  183. 31 0
      LF_scanner/pypulseq/calc_rf_center.py
  184. 119 0
      LF_scanner/pypulseq/check_timing.py
  185. 76 0
      LF_scanner/pypulseq/compress_shape.py
  186. 91 0
      LF_scanner/pypulseq/convert.py
  187. 74 0
      LF_scanner/pypulseq/decompress_shape.py
  188. 316 0
      LF_scanner/pypulseq/event_lib.py
  189. 66 0
      LF_scanner/pypulseq/make_adc.py
  190. 262 0
      LF_scanner/pypulseq/make_adiabatic_pulse.py
  191. 77 0
      LF_scanner/pypulseq/make_arbitrary_grad.py
  192. 154 0
      LF_scanner/pypulseq/make_arbitrary_rf.py
  193. 106 0
      LF_scanner/pypulseq/make_block_pulse.py
  194. 30 0
      LF_scanner/pypulseq/make_delay.py
  195. 47 0
      LF_scanner/pypulseq/make_digital_output_pulse.py
  196. 143 0
      LF_scanner/pypulseq/make_extended_trapezoid.py
  197. 133 0
      LF_scanner/pypulseq/make_extended_trapezoid_area.py
  198. 179 0
      LF_scanner/pypulseq/make_gauss_pulse.py
  199. 56 0
      LF_scanner/pypulseq/make_label.py
  200. 268 0
      LF_scanner/pypulseq/make_sigpy_pulse.py
  201. 172 0
      LF_scanner/pypulseq/make_sinc_pulse.py
  202. 203 0
      LF_scanner/pypulseq/make_trapezoid.py
  203. 53 0
      LF_scanner/pypulseq/make_trigger.py
  204. 110 0
      LF_scanner/pypulseq/opts.py
  205. 41 0
      LF_scanner/pypulseq/points_to_waveform.py
  206. 20 0
      LF_scanner/pypulseq/recon_examples/2dFFT.py
  207. 0 0
      LF_scanner/pypulseq/recon_examples/__init__.py
  208. 123 0
      LF_scanner/pypulseq/rotate.py
  209. 35 0
      LF_scanner/pypulseq/scale_grad.py
  210. 0 0
      LF_scanner/pypulseq/seq_examples/__init__.py
  211. 0 0
      LF_scanner/pypulseq/seq_examples/new_scripts/__init__.py
  212. 339 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_FS_TSE_T1_T2_PD.py
  213. 287 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_HASTE_T2.py
  214. 396 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_IR_TSE_T1_T2.py
  215. 213 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_SE.py
  216. 452 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_SPAIR_TSE_T2.py
  217. 289 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_TSE_T1_T2_PD.py
  218. 289 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_TSE_T2_RESTORE.py
  219. 264 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_epi_SE_T2.py
  220. 286 0
      LF_scanner/pypulseq/seq_examples/new_scripts/write_tse.py
  221. 449 0
      LF_scanner/pypulseq/seq_examples/notebooks/write_t2_se.ipynb
  222. 43 0
      LF_scanner/pypulseq/seq_examples/scripts/README.md
  223. 0 0
      LF_scanner/pypulseq/seq_examples/scripts/__init__.py
  224. 100 0
      LF_scanner/pypulseq/seq_examples/scripts/demo_read.py
  225. BIN
      LF_scanner/pypulseq/seq_examples/scripts/example_recons/gre.png
  226. BIN
      LF_scanner/pypulseq/seq_examples/scripts/example_recons/tse.png
  227. 129 0
      LF_scanner/pypulseq/seq_examples/scripts/write_2Dt1_mprage.py
  228. 155 0
      LF_scanner/pypulseq/seq_examples/scripts/write_3Dt1_mprage.py
  229. 196 0
      LF_scanner/pypulseq/seq_examples/scripts/write_MPRAGE.py
  230. 114 0
      LF_scanner/pypulseq/seq_examples/scripts/write_epi.py
  231. 174 0
      LF_scanner/pypulseq/seq_examples/scripts/write_epi_label.py
  232. 139 0
      LF_scanner/pypulseq/seq_examples/scripts/write_epi_se.py
  233. 287 0
      LF_scanner/pypulseq/seq_examples/scripts/write_epi_se_rs.py
  234. 158 0
      LF_scanner/pypulseq/seq_examples/scripts/write_gre.py
  235. 169 0
      LF_scanner/pypulseq/seq_examples/scripts/write_gre_label.py
  236. 326 0
      LF_scanner/pypulseq/seq_examples/scripts/write_haste.py
  237. 142 0
      LF_scanner/pypulseq/seq_examples/scripts/write_radial_gre.py
  238. 332 0
      LF_scanner/pypulseq/seq_examples/scripts/write_tse.py
  239. 347 0
      LF_scanner/pypulseq/seq_examples/scripts/write_tse_new.py
  240. 180 0
      LF_scanner/pypulseq/seq_examples/scripts/write_ute.py
  241. 41 0
      LF_scanner/pypulseq/sigpy_pulse_opts.py
  242. 93 0
      LF_scanner/pypulseq/split_gradient.py
  243. 147 0
      LF_scanner/pypulseq/split_gradient_at.py
  244. 37 0
      LF_scanner/pypulseq/supported_labels_rf_use.py
  245. 0 0
      LF_scanner/pypulseq/tests/__init__.py
  246. 28 0
      LF_scanner/pypulseq/tests/base.py
  247. 19 0
      LF_scanner/pypulseq/tests/test_MPRAGE.py
  248. 19 0
      LF_scanner/pypulseq/tests/test_epi.py
  249. 19 0
      LF_scanner/pypulseq/tests/test_epi_label.py
  250. 19 0
      LF_scanner/pypulseq/tests/test_epi_se.py
  251. 19 0
      LF_scanner/pypulseq/tests/test_epi_se_rs.py
  252. 19 0
      LF_scanner/pypulseq/tests/test_gre.py
  253. 19 0
      LF_scanner/pypulseq/tests/test_gre_label.py
  254. 19 0
      LF_scanner/pypulseq/tests/test_gre_radial.py
  255. 19 0
      LF_scanner/pypulseq/tests/test_haste.py
  256. 120 0
      LF_scanner/pypulseq/tests/test_sigpy.py
  257. 19 0
      LF_scanner/pypulseq/tests/test_tse.py
  258. 19 0
      LF_scanner/pypulseq/tests/test_ute.py
  259. 39 0
      LF_scanner/pypulseq/traj_to_grad.py
  260. 40 0
      LF_scanner/pypulseq/utilities/TSE_k_space_fill.py
  261. 0 0
      LF_scanner/pypulseq/utilities/__init__.py
  262. 39 0
      LF_scanner/pypulseq/utilities/magn_prep/FS_CHESS_block.py
  263. 32 0
      LF_scanner/pypulseq/utilities/magn_prep/IR_block.py
  264. 43 0
      LF_scanner/pypulseq/utilities/magn_prep/SPAIR_block.py
  265. 0 0
      LF_scanner/pypulseq/utilities/magn_prep/__init__.py
  266. 68 0
      LF_scanner/pypulseq/utilities/magn_prep/magn_prep.py
  267. 17 0
      LF_scanner/pypulseq/utilities/phase_grad_utils.py
  268. 188 0
      LF_scanner/pypulseq/utilities/standart_RF.py
  269. 0 0
      LF_scanner/pypulseq/utils/SAR/__init__.py
  270. 0 0
      LF_scanner/pypulseq/utils/__init__.py
  271. 15 0
      LF_scanner/pypulseq/utils/cumsum.py
  272. 411 0
      LF_scanner/pypulseq/utils/safe_pns_prediction.py
  273. 1 0
      LF_scanner/pypulseq/utils/siemens/__init__.py
  274. 105 0
      LF_scanner/pypulseq/utils/siemens/asc_to_hw.py
  275. 97 0
      LF_scanner/pypulseq/utils/siemens/readasc.py
  276. BIN
      LF_scanner/rf_1.h5
  277. BIN
      LF_scanner/rf_2.h5
  278. BIN
      LF_scanner/rf_3.h5
  279. BIN
      LF_scanner/rf_4.h5
  280. BIN
      LF_scanner/rf_5.h5
  281. BIN
      LF_scanner/rf_6.h5
  282. BIN
      LF_scanner/rf_7.h5
  283. BIN
      LF_scanner/rf_8.h5
  284. 0 0
      LF_scanner/services/Protocol/__init__.py
  285. 14 0
      LF_scanner/services/Protocol/protocol.py
  286. 4 0
      LF_scanner/services/__init__.py
  287. 348 0
      LF_scanner/services/srv_interp.py
  288. 0 0
      LF_scanner/services/srv_stack.py
  289. 53 0
      LF_scanner/setup.py
  290. 509 0
      LF_scanner/t1_SE.ipynb
  291. 626 0
      LF_scanner/t1_SE_experimental.ipynb
  292. 346 0
      LF_scanner/t1_SE_final.ipynb
  293. 477 0
      LF_scanner/t1_SE_final_final.ipynb
  294. 424 0
      LF_scanner/t1_SE_final_max_grad.ipynb
  295. 499 0
      LF_scanner/t2_SE_backup.ipynb
  296. 66 0
      LF_scanner/t2_SE_original.ipynb
  297. 0 0
      LF_scanner/t2_se_pypulseq_colab.xml
  298. 0 0
      LF_scanner/utilities/__init__.py
  299. 16 0
      LF_scanner/utilities/phase_grad_utils.py
  300. 5 0
      LF_scanner/version.py
  301. 448 0
      LF_scanner/write_se_new.ipynb
  302. 463 0
      LF_scanner/write_t2_se.ipynb

+ 74 - 0
LF_scanner/CODE_OF_CONDUCT.md

@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the team at <imr-framework2018@gmail.com>. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/

+ 30 - 0
LF_scanner/CONTRIBUTING.md

@@ -0,0 +1,30 @@
+# Contributing to `pypulseq`
+:thumbsup: :tada: Thanks for taking time to contribute! :thumbsup: :tada:
+
+Here are guidelines (not rules!) for contributing to `pypulseq`. Use your best judgment, and feel free to propose 
+changes to this document in a pull request.
+
+## Table of contents
+1. [Code of Conduct](#code-of-conduct)
+2. [PEP Style Guide for Python coding](#style-guide-for-python-code)
+
+## Code of Conduct
+This project and everyone participating in it is governed by the 
+[`pypulseq` Code of Conduct][code_of_conduct].
+By participating, you are expected to uphold this code. Please report unacceptable behavior to 
+[imr.framework2018@gmail.com][email].
+
+## Pull requests
+Follow the coding conventions laid out in the [Style Guide for Python Code](style_guide). Ensure source code is 
+documented as per the Numpy convention [[numpy1]], [[numpy2]]. If you notice any `pypulseq` code not adhering to 
+[PEP8](style-guide), submit a pull request or open an issue.
+
+## Issues
+Please adhere to the appropriate templates when reporting bugs or requesting features. The templates are automatically 
+presented via Github's 'New Issue' feature.
+
+[email]: mailto:imr.framework2018@gmail.com
+[code_of_conduct]: https://github.com/imr-framework/pypulseq/blob/master/CODE_OF_CONDUCT.md
+[style_guide]: https://www.python.org/dev/peps/pep-0008/
+[numpy1]: https://numpydoc.readthedocs.io/en/latest/format.html
+[numpy2]: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html

File diff suppressed because it is too large
+ 102 - 0
LF_scanner/FID_from_scratch.ipynb


+ 661 - 0
LF_scanner/LICENSE

@@ -0,0 +1,661 @@
+                    GNU AFFERO GENERAL PUBLIC LICENSE
+                       Version 3, 19 November 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU Affero General Public License is a free, copyleft license for
+software and other kinds of works, specifically designed to ensure
+cooperation with the community in the case of network server software.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+our General Public Licenses are intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  Developers that use our General Public Licenses protect your rights
+with two steps: (1) assert copyright on the software, and (2) offer
+you this License which gives you legal permission to copy, distribute
+and/or modify the software.
+
+  A secondary benefit of defending all users' freedom is that
+improvements made in alternate versions of the program, if they
+receive widespread use, become available for other developers to
+incorporate.  Many developers of free software are heartened and
+encouraged by the resulting cooperation.  However, in the case of
+software used on network servers, this result may fail to come about.
+The GNU General Public License permits making a modified version and
+letting the public access it on a server without ever releasing its
+source code to the public.
+
+  The GNU Affero General Public License is designed specifically to
+ensure that, in such cases, the modified source code becomes available
+to the community.  It requires the operator of a network server to
+provide the source code of the modified version running there to the
+users of that server.  Therefore, public use of a modified version, on
+a publicly accessible server, gives the public access to the source
+code of the modified version.
+
+  An older license, called the Affero General Public License and
+published by Affero, was designed to accomplish similar goals.  This is
+a different license, not a version of the Affero GPL, but Affero has
+released a new version of the Affero GPL which permits relicensing under
+this license.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU Affero General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Remote Network Interaction; Use with the GNU General Public License.
+
+  Notwithstanding any other provision of this License, if you modify the
+Program, your modified version must prominently offer all users
+interacting with it remotely through a computer network (if your version
+supports such interaction) an opportunity to receive the Corresponding
+Source of your version by providing access to the Corresponding Source
+from a network server at no charge, through some standard or customary
+means of facilitating copying of software.  This Corresponding Source
+shall include the Corresponding Source for any work covered by version 3
+of the GNU General Public License that is incorporated pursuant to the
+following paragraph.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the work with which it is combined will remain governed by version
+3 of the GNU General Public License.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU Affero General Public License from time to time.  Such new versions
+will be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU Affero General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU Affero General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU Affero General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU Affero General Public License as published
+    by the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU Affero General Public License for more details.
+
+    You should have received a copy of the GNU Affero General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If your software can interact with users remotely through a computer
+network, you should also make sure that it provides a way for users to
+get its source.  For example, if your program is a web application, its
+interface could display a "Source" link that leads users to an archive
+of the code.  There are many ways you could offer source, and different
+solutions will be better for different programs; see section 13 for the
+specific requirements.
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU AGPL, see
+<https://www.gnu.org/licenses/>.

+ 3 - 0
LF_scanner/MANIFEST.in

@@ -0,0 +1,3 @@
+include VERSION
+include pypulseq/SAR/QGlobal.mat
+recursive-include pypulseq/seq_examples/* *

+ 201 - 0
LF_scanner/README.md

@@ -0,0 +1,201 @@
+<p align="center">
+
+![PyPulseq](logo.png)
+
+</p>
+
+# PyPulseq: A Python Package for MRI Pulse Sequence Design
+
+`Compatible with Pulseq 1.4.0`
+
+
+🚨🚨🚨 **NOTE:** This is the `dev` branch which hosts the bleeding edge version. For the most recent, stable release,
+switch to the `master` branch by clicking [here](https://github.com/imr-framework/pypulseq/tree/master). 🚨🚨🚨
+
+## Table of contents 🧾
+1. [👥 Contributors][section-contributors]
+2. [📚 Citations][section-relevant-literature]
+3. [🔨 Installation][section-installation]
+4. [⚡ Lightning-start - PyPulseq in your browser!][section-lightning-start]
+5. [🏃‍♂ Quickstart - example scripts][section-quickstart-examples]
+6. [🤿 Deep dive - custom pulse sequences][section-deep-dive]
+7. [👥 Contributing and Community guidelines][section-contributing]
+8. [📖 References][section-references]
+9. [📃 API documentation][api-docs]
+
+---
+
+Pulse sequence design is a significant component of MRI research. However, multi-vendor studies require researchers to
+be acquainted with each hardware platform's programming environment.
+
+PyPulseq enables vendor-neutral pulse sequence design in Python [[1,2]][section-references]. The pulse sequences can be
+exported as a `.seq` file to be run on  Siemens/[GE]/[Bruker] hardware by leveraging their respective
+Pulseq interpreters. This tool is targeted at MRI pulse sequence designers, researchers, students and other interested
+users. It is a translation of the Pulseq framework originally written in Matlab [[3]][section-references].
+
+👉 Currently, PyPulseq is compatible with Pulseq 1.4.0. 👈
+
+It is strongly recommended to first read the [Pulseq specification]  before proceeding. The specification
+document defines the concepts required for pulse sequence design using PyPulseq.
+
+If you use PyPulseq in your work, cite the following publications:
+```
+Ravi, Keerthi, Sairam Geethanath, and John Vaughan. "PyPulseq: A Python Package for MRI Pulse Sequence Design." Journal
+of Open Source Software 4.42 (2019): 1725.
+
+Ravi, Keerthi Sravan, et al. "Pulseq-Graphical Programming Interface: Open source visual environment for prototyping
+pulse sequences and integrated magnetic resonance imaging algorithm development." Magnetic resonance imaging 52 (2018):
+9-15.
+```
+
+## 📢 Pulse sequence development in your browser!
+Design pulse sequences using `pypulseq` in your browser! Check out the [⚡ Lightning-start][section-lightning-start] section to
+learn how!
+
+## 1. 👥 Contributors (alphabetical)
+- @bilal-tasdelen
+- @calderds
+- @mavel101
+- @nnmurthy
+- @sairamgeethanath
+- @schuenke
+- @skarrea
+- @tonggehua
+
+Please email me/submit PR/open an issue if any contributors are missing.
+
+## 2. 📚 [Citations][scholar-citations] (reverse chronological)
+1. Hennig, J., Barghoorn, A., Zhang, S. and Zaitsev, M., 2022. Single shot spiral TSE with annulated segmentation.
+Magnetic Resonance in Medicine.
+2. Niso, G., Botvinik-Nezer, R., Appelhoff, S., De La Vega, A., Esteban, O., Etzel, J.A., Finc, K., Ganz, M., Gau, R.,
+Halchenko, Y.O. and Herholz, P., 2022. Open and reproducible neuroimaging: from study inception to publication.
+3. Tong, G., Gaspar, A.S., Qian, E., Ravi, K.S., Vaughan, J.T., Nunes, R.G. and Geethanath, S., 2022. Open-source
+magnetic resonance imaging acquisition: Data and documentation for two validated pulse sequences. Data in Brief, 42,
+p.108105.
+4. Tong, G., Gaspar, A.S., Qian, E., Ravi, K.S., Vaughan Jr, J.T., Nunes, R.G. and Geethanath, S., 2022. A framework
+for validating open-source pulse sequences. Magnetic resonance imaging, 87, pp.7-18.
+5. Karakuzu, A., Appelhoff, S., Auer, T., Boudreau, M., Feingold, F., Khan, A.R., Lazari, A., Markiewicz, C., Mulder,
+M.J., Phillips, C. and Salo, T., 2021. qMRI-BIDS: an extension to the brain imaging data structure for quantitative
+magnetic resonance imaging data. medRxiv.
+6. Karakuzu, A., Biswas, L., Cohen‐Adad, J. and Stikov, N., 2021. Vendor‐neutral sequences and fully transparent
+workflows improve inter‐vendor reproducibility of quantitative MRI. Magnetic Resonance in Medicine.
+7. Geethanath, S., Single echo reconstruction for rapid and silent MRI. (ISMRM) (2021).
+8. Qian, E. and Geethanath, S., Open source Magnetic rEsonance fingerprinting pAckage (OMEGA). (ISMRM) (2021).
+9. Ravi, K.S., O'Reilly, T., Vaughan Jr, J.T., Webb, A. and Geethanath, S., Seq2prospa: translating PyPulseq for
+low-field imaging. (ISMRM) (2021).
+10. Ravi, K.S., Vaughan Jr, J.T. and Geethanath, S., PyPulseq in a web browser: a zero footprint tool for collaborative
+and vendor-neutral pulse sequence development. (ISMRM) (2021).
+11. Ravi, K.S. and Geethanath, S., 2020. Autonomous magnetic resonance imaging. Magnetic Resonance Imaging, 73,
+pp.177-185.
+12. Nunes, Rita G., et al. "Implementation of a Diffusion-Weighted Echo Planar Imaging sequence using the Open Source
+Hardware-Independent PyPulseq Tool." ISMRM & SMRT Virtual Conference & Exhibition, International Society for Magnetic
+Resonance in Medicine (ISMRM) (2020).
+13. Loktyushin, Alexander, et al. "MRzero--Fully automated invention of MRI sequences using supervised learning." arXiv
+preprint arXiv:2002.04265 (2020).
+14. Jimeno, Marina Manso, et al. "Cross-vendor implementation of a Stack-of-spirals PRESTO BOLD fMRI sequence using
+TOPPE and Pulseq." ISMRM & SMRT Virtual Conference & Exhibition, International Society for Magnetic Resonance in
+Medicine (ISMRM) (2020).
+15. Clarke, William T., et al. "Multi-site harmonization of 7 tesla MRI neuroimaging protocols." NeuroImage 206 (2020): 116335.
+16. Geethanath, Sairam, and John Thomas Vaughan Jr. "Accessible magnetic resonance imaging: a review." Journal of
+Magnetic Resonance Imaging 49.7 (2019): e65-e77.
+17. Tong, Gehua, et al. "Virtual Scanner: MRI on a Browser." Journal of Open Source Software 4.43 (2019): 1637.
+18. Archipovas, Saulius, et al. "A prototype of a fully integrated environment for a collaborative work in MR sequence
+development for a reproducible research." ISMRM 27th Annual Meeting & Exhibition, International Society for
+Magnetic Resonance in Medicine (ISMRM) (2019).
+19. Pizetta, Daniel Cosmo. PyMR: a framework for programming magnetic resonance systems. Diss. Universidade de São
+Paulo (2018).
+---
+
+## 3. 🔨 Installation
+\>=Python 3.6, virtual environment recommended:
+
+```pip install pypulseq```
+
+## 4. ⚡ Lightning-start - PyPulseq in your browser!
+1. Create a new notebook on [Google Colab][google-colab]
+2. [Install][section-installation] PyPulseq
+3. Get going!
+
+Or, explore an example notebook:
+1. Copy URL of an example notebook from [here][section-notebook-examples]
+2. On [Google Colab][google-colab], insert the copied link to get started
+
+## 5. 🏃‍♂ Quickstart - example scripts
+Every example script creates a pulse sequence, plots the pulse timing diagram and writes a `.seq` file to disk.
+1. [Install][section-installation] PyPulseq
+2. Download and run any of the [example][script-examples] scripts.
+
+## 6. 🤿 Deep dive - custom pulse sequences
+Getting started with pulse sequence design using `PyPulseq` is simple:
+1. [Install][section-installation] PyPulseq
+2. First, define system limits in `Opts` and then create a `Sequence` object with it:
+    ```python
+    import pypulseq as pp
+
+    system = pp.Opts(max_grad=32, grad_unit='mT/m', max_slew=130, slew_unit='mT/m/ms')
+    seq = pp.Sequence(system=system)
+    ```
+3. Then, design gradient, RF or ADC pulse sequence events:
+    ```python
+    Nx, Ny = 256, 256 # matrix size
+    fov = 220e-3 # field of view
+    delta_k = fov / Nx
+
+    # RF sinc pulse with a 90 degree flip angle
+    rf90 = pp.make_sinc_pulse(flip_angle=90, duration=2e-3, system=system, slice_thickness=5e-3, apodization=0.5,
+       time_bw_product=4)
+
+    # Frequency encode, trapezoidal event
+    gx = pp.make_trapezoid(channel='x', flat_area=Nx * delta_k, flat_time=6.4e-3, system=system)
+
+    # ADC readout
+    adc = pp.make_adc(num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time, system=system)
+    ```
+4. Add these pulse sequence events to the `Sequence` object from step 2. One or more events can be executed
+simultaneously, simply pass them all to the `add_block()` method. For example, the `gx` and `adc` pulse sequence events
+need to be executed simultaneously:
+    ```python
+    seq.add_block(rf90)
+    seq.add_block(gx, adc)
+    ```
+5. Visualize plots:
+    ```python
+    seq.plot()
+    ```
+6. Generate a `.seq` file to be executed on a real MR scanner:
+    ```python
+    seq.write('demo.seq')
+    ```
+
+**Get in touch regarding running the `.seq` files on your Siemens/[GE]/[Bruker] scanner.**
+
+## 7. 👥 Contributing and Community guidelines
+`PyPulseq` adheres to a code of conduct adapted from the [Contributor Covenant] code of conduct.
+Contributing guidelines can be found [here][contrib-guidelines].
+
+## 8. 📖 References
+1. Ravi, Keerthi, Sairam Geethanath, and John Vaughan. "PyPulseq: A Python Package for MRI Pulse Sequence Design."
+Journal of Open Source Software 4.42 (2019): 1725.
+2. Ravi, Keerthi Sravan, et al. "Pulseq-Graphical Programming Interface: Open source visual environment for prototyping
+pulse sequences and integrated magnetic resonance imaging algorithm development." Magnetic resonance imaging 52 (2018):
+9-15.
+3. Layton, Kelvin J., et al. "Pulseq: a rapid and hardware‐independent pulse sequence prototyping framework." Magnetic
+resonance in medicine 77.4 (2017): 1544-1552.
+
+[Bruker]: https://github.com/pulseq/bruker_interpreter
+[Contributor Covenant]: http://contributor-covenant.org
+[GE]: https://toppemri.github.io
+[Pulseq specification]: https://pulseq.github.io/specification.pdf
+[api-docs]: https://pypulseq.readthedocs.io/
+[contrib-guidelines]: https://github.com/imr-framework/pypulseq/blob/master/CONTRIBUTING.md
+[google-colab]: https://colab.research.google.com/
+[scholar-citations]: https://scholar.google.com/scholar?oi=bibs&hl=en&cites=16703093871665262997
+[script-examples]: https://github.com/imr-framework/pypulseq/tree/dev/pypulseq/seq_examples/scripts
+[section-contributors]: #1--contributors-alphabetical
+[section-contributing]: #7--contributing-and-community-guidelines
+[section-deep-dive]: #6--deep-dive---custom-pulse-sequences
+[section-installation]: #3--installation
+[section-lightning-start]: #4--lightning-start---pypulseq-in-your-browser
+[section-quickstart-examples]: #5--quickstart---example-scripts
+[section-references]: #8--references
+[section-relevant-literature]: #2--citations-reverse-chronological

+ 39 - 0
LF_scanner/TSE_20231019_161845.json

@@ -0,0 +1,39 @@
+{
+    "G_amp_max": 1609372.8,
+    "G_slew_max": 5151696000.0,
+    "gamma": 42576000.0,
+    "grad_raster_time": 1e-05,
+    "rf_raster_time": 1e-06,
+    "t_BW_product_ex": 3.55,
+    "t_BW_product_ref": 3.55,
+    "t_ex": 0.00298,
+    "t_ref": 0.00333,
+    "rf_ringdown_time": [
+        2e-05
+    ],
+    "rf_dead_time": [
+        0.0001
+    ],
+    "adc_dead_time": [
+        1e-05
+    ],
+    "aapodization": 0.27,
+    "dG": 0.00025,
+    "sl_nb": 1.0,
+    "sl_thkn": 0.005,
+    "sl_gap": 100.0,
+    "FoV_f": 0.032,
+    "FoV_ph": 0.032,
+    "Nf": 16.0,
+    "Np": 16.0,
+    "BW_pixel": 500.0,
+    "TE": 0.02,
+    "N_TE": 2.0,
+    "ES": 0.01,
+    "TR": 0.5,
+    "FA": 90.0,
+    "conct": 1.0,
+    "ETL": 8.0,
+    "Average": 1,
+    "delay_TR": 0.41484
+}

File diff suppressed because it is too large
+ 762 - 0
LF_scanner/TSE_pulse_sequence-Copy1.ipynb


File diff suppressed because it is too large
+ 762 - 0
LF_scanner/TSE_pulse_sequence.ipynb


File diff suppressed because it is too large
+ 762 - 0
LF_scanner/TSE_pulse_sequence_T1.ipynb


File diff suppressed because it is too large
+ 722 - 0
LF_scanner/TSE_splited_gradients.ipynb


File diff suppressed because it is too large
+ 628 - 0
LF_scanner/TSE_splited_gradients_RESTORE.ipynb


+ 0 - 0
LF_scanner/__init__.py


+ 20 - 0
LF_scanner/doc/Makefile

@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

+ 0 - 0
LF_scanner/doc/__init__.py


+ 35 - 0
LF_scanner/doc/make.bat

@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd

+ 2 - 0
LF_scanner/doc/readthedocs_requirements.txt

@@ -0,0 +1,2 @@
+furo==2021.4.11b34
+jinja2<3.1.0

+ 0 - 0
LF_scanner/doc/source/__init__.py


+ 66 - 0
LF_scanner/doc/source/conf.py

@@ -0,0 +1,66 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+import os
+import sys
+sys.path.insert(0, os.path.abspath('../../'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'pypulseq'
+copyright = '2023, Keerthi Sravan Ravi'
+author = 'Keerthi Sravan Ravi'
+
+# The full version, including alpha/beta/rc tags
+release = '1.4.0'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'recommonmark'
+]
+
+source_suffix = {
+    '.rst': 'restructuredtext',
+    '.txt': 'restructuredtext',
+    '.md': 'markdown',
+}
+
+source_parsers = {'.md': 'recommonmark.parser.CommonMarkParser'}
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['setup*', 'version*']
+
+autodoc_mock_imports = ['numpy', 'matplotlib', 'scipy', 'sigpy']
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'furo'
+html_logo = '../../logo_transparent.png'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']

+ 25 - 0
LF_scanner/doc/source/index.rst

@@ -0,0 +1,25 @@
+pypulseq
+====================================
+.. image:: ../../logo.png
+   :align: center
+
+Introduction
+============
+`pypulseq <https://github.com/imr-framework/pypulseq>`_ enables vendor-neutral pulse sequence design in Python [1]_ [2]_. The pulse sequences can be exported as a `.seq` file to be run on  Siemens/`GE <https://toppemri.github.io>`_/`Bruker <https://github.com/pulseq/bruker_interpreter>`_ hardware by leveraging their respective Pulseq interpreters. This tool is targeted at MR pulse sequence designers, MRI researchers and other interested users. It is a translation of the Pulseq framework originally written in Matlab [3]_.
+
+It is strongly recommended to first read the `Pulseq specification <https://pulseq.github.io/specification.pdf>`_ before proceeding. The specification document defines the concepts required for pulse sequence design using `pypulseq`.
+
+.. [1] Ravi, Keerthi, Sairam Geethanath, and John Vaughan. "PyPulseq: A Python Package for MRI Pulse Sequence Design." Journal of Open Source Software 4.42 (2019): 1725.
+
+.. [2] Ravi, Keerthi Sravan, et al. "Pulseq-Graphical Programming Interface: Open source visual environment for prototyping pulse sequences and integrated magnetic resonance imaging algorithm development." Magnetic resonance imaging 52 (2018): 9-15.
+
+.. [3] Layton, Kelvin J., et al. "Pulseq: a rapid and hardware‐independent pulse sequence prototyping framework." Magnetic resonance in medicine 77.4 (2017): 1544-1552.
+
+
+API documentation
+==================
+
+.. toctree::
+    :maxdepth: 2
+
+    modules

+ 9 - 0
LF_scanner/doc/source/modules.rst

@@ -0,0 +1,9 @@
+pypulseq
+========
+
+.. toctree::
+   :maxdepth: 4
+
+   pypulseq
+   setup
+   version

+ 21 - 0
LF_scanner/doc/source/pypulseq.SAR.rst

@@ -0,0 +1,21 @@
+pypulseq.SAR package
+====================
+
+Submodules
+----------
+
+pypulseq.SAR.SAR\_calc module
+-----------------------------
+
+.. automodule:: pypulseq.SAR.SAR_calc
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: pypulseq.SAR
+   :members:
+   :undoc-members:
+   :show-inheritance:

+ 61 - 0
LF_scanner/doc/source/pypulseq.Sequence.rst

@@ -0,0 +1,61 @@
+pypulseq.Sequence package
+=========================
+
+Submodules
+----------
+
+pypulseq.Sequence.block module
+------------------------------
+
+.. automodule:: pypulseq.Sequence.block
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.Sequence.ext\_test\_report module
+------------------------------------------
+
+.. automodule:: pypulseq.Sequence.ext_test_report
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.Sequence.parula module
+-------------------------------
+
+.. automodule:: pypulseq.Sequence.parula
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.Sequence.read\_seq module
+----------------------------------
+
+.. automodule:: pypulseq.Sequence.read_seq
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.Sequence.sequence module
+---------------------------------
+
+.. automodule:: pypulseq.Sequence.sequence
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.Sequence.write\_seq module
+-----------------------------------
+
+.. automodule:: pypulseq.Sequence.write_seq
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: pypulseq.Sequence
+   :members:
+   :undoc-members:
+   :show-inheritance:

+ 319 - 0
LF_scanner/doc/source/pypulseq.rst

@@ -0,0 +1,319 @@
+pypulseq package
+================
+
+Subpackages
+-----------
+
+.. toctree::
+   :maxdepth: 4
+
+   pypulseq.SAR
+   pypulseq.Sequence
+   pypulseq.tests
+
+Submodules
+----------
+
+pypulseq.add\_gradients module
+------------------------------
+
+.. automodule:: pypulseq.add_gradients
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.add\_ramps module
+--------------------------
+
+.. automodule:: pypulseq.add_ramps
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.align module
+---------------------
+
+.. automodule:: pypulseq.align
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.block\_to\_events module
+---------------------------------
+
+.. automodule:: pypulseq.block_to_events
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.calc\_duration module
+------------------------------
+
+.. automodule:: pypulseq.calc_duration
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.calc\_ramp module
+--------------------------
+
+.. automodule:: pypulseq.calc_ramp
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.calc\_rf\_bandwidth module
+-----------------------------------
+
+.. automodule:: pypulseq.calc_rf_bandwidth
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.calc\_rf\_center module
+--------------------------------
+
+.. automodule:: pypulseq.calc_rf_center
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.check\_timing module
+-----------------------------
+
+.. automodule:: pypulseq.check_timing
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.compress\_shape module
+-------------------------------
+
+.. automodule:: pypulseq.compress_shape
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.convert module
+-----------------------
+
+.. automodule:: pypulseq.convert
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.decompress\_shape module
+---------------------------------
+
+.. automodule:: pypulseq.decompress_shape
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.event\_lib module
+--------------------------
+
+.. automodule:: pypulseq.event_lib
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_adc module
+-------------------------
+
+.. automodule:: pypulseq.make_adc
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_adiabatic\_pulse module
+--------------------------------------
+
+.. automodule:: pypulseq.make_adiabatic_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_arbitrary\_grad module
+-------------------------------------
+
+.. automodule:: pypulseq.make_arbitrary_grad
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_arbitrary\_rf module
+-----------------------------------
+
+.. automodule:: pypulseq.make_arbitrary_rf
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_block\_pulse module
+----------------------------------
+
+.. automodule:: pypulseq.make_block_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_delay module
+---------------------------
+
+.. automodule:: pypulseq.make_delay
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_digital\_output\_pulse module
+--------------------------------------------
+
+.. automodule:: pypulseq.make_digital_output_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_extended\_trapezoid module
+-----------------------------------------
+
+.. automodule:: pypulseq.make_extended_trapezoid
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_extended\_trapezoid\_area module
+-----------------------------------------------
+
+.. automodule:: pypulseq.make_extended_trapezoid_area
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_gauss\_pulse module
+----------------------------------
+
+.. automodule:: pypulseq.make_gauss_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_label module
+---------------------------
+
+.. automodule:: pypulseq.make_label
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_sigpy\_pulse module
+----------------------------------
+
+.. automodule:: pypulseq.make_sigpy_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_sinc\_pulse module
+---------------------------------
+
+.. automodule:: pypulseq.make_sinc_pulse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_trapezoid module
+-------------------------------
+
+.. automodule:: pypulseq.make_trapezoid
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.make\_trigger module
+-----------------------------
+
+.. automodule:: pypulseq.make_trigger
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.opts module
+--------------------
+
+.. automodule:: pypulseq.opts
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.points\_to\_waveform module
+------------------------------------
+
+.. automodule:: pypulseq.points_to_waveform
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.rotate module
+----------------------
+
+.. automodule:: pypulseq.rotate
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.scale\_grad module
+---------------------------
+
+.. automodule:: pypulseq.scale_grad
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.sigpy\_pulse\_opts module
+----------------------------------
+
+.. automodule:: pypulseq.sigpy_pulse_opts
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.split\_gradient module
+-------------------------------
+
+.. automodule:: pypulseq.split_gradient
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.split\_gradient\_at module
+-----------------------------------
+
+.. automodule:: pypulseq.split_gradient_at
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.supported\_labels\_rf\_use module
+------------------------------------------
+
+.. automodule:: pypulseq.supported_labels_rf_use
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.traj\_to\_grad module
+------------------------------
+
+.. automodule:: pypulseq.traj_to_grad
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: pypulseq
+   :members:
+   :undoc-members:
+   :show-inheritance:

+ 117 - 0
LF_scanner/doc/source/pypulseq.tests.rst

@@ -0,0 +1,117 @@
+pypulseq.tests package
+======================
+
+Submodules
+----------
+
+pypulseq.tests.base module
+--------------------------
+
+.. automodule:: pypulseq.tests.base
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_MPRAGE module
+----------------------------------
+
+.. automodule:: pypulseq.tests.test_MPRAGE
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_epi module
+-------------------------------
+
+.. automodule:: pypulseq.tests.test_epi
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_epi\_label module
+--------------------------------------
+
+.. automodule:: pypulseq.tests.test_epi_label
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_epi\_se module
+-----------------------------------
+
+.. automodule:: pypulseq.tests.test_epi_se
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_epi\_se\_rs module
+---------------------------------------
+
+.. automodule:: pypulseq.tests.test_epi_se_rs
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_gre module
+-------------------------------
+
+.. automodule:: pypulseq.tests.test_gre
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_gre\_label module
+--------------------------------------
+
+.. automodule:: pypulseq.tests.test_gre_label
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_gre\_radial module
+---------------------------------------
+
+.. automodule:: pypulseq.tests.test_gre_radial
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_haste module
+---------------------------------
+
+.. automodule:: pypulseq.tests.test_haste
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_sigpy module
+---------------------------------
+
+.. automodule:: pypulseq.tests.test_sigpy
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_tse module
+-------------------------------
+
+.. automodule:: pypulseq.tests.test_tse
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+pypulseq.tests.test\_ute module
+-------------------------------
+
+.. automodule:: pypulseq.tests.test_ute
+   :members:
+   :undoc-members:
+   :show-inheritance:
+
+Module contents
+---------------
+
+.. automodule:: pypulseq.tests
+   :members:
+   :undoc-members:
+   :show-inheritance:

+ 7 - 0
LF_scanner/doc/source/setup.rst

@@ -0,0 +1,7 @@
+setup module
+============
+
+.. automodule:: setup
+   :members:
+   :undoc-members:
+   :show-inheritance:

+ 7 - 0
LF_scanner/doc/source/version.rst

@@ -0,0 +1,7 @@
+version module
+==============
+
+.. automodule:: version
+   :members:
+   :undoc-members:
+   :show-inheritance:

BIN
LF_scanner/doc/walkthrough/gre_1.png


BIN
LF_scanner/doc/walkthrough/gre_2.png


File diff suppressed because it is too large
+ 84 - 0
LF_scanner/doc/walkthrough/gre_walkthrough.ipynb


BIN
LF_scanner/logo.png


BIN
LF_scanner/logo_transparent.png


+ 0 - 0
LF_scanner/new_MRI_pulse_seq/__init__.py


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/pd_TSE/pd_TSE_matrx16x16_fixed_delay.xml


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/pd_TSE/pd_TSE_matrx16x16_myGrad.xml


BIN
LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/pd_TSE/rf_3.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_FS_TSE/FS_T1_TSE_1.png


BIN
LF_scanner/new_MRI_pulse_seq/t1_FS_TSE/FS_T1_TSE_2.png


+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t1_TSE/__init__.py


BIN
LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_3.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_TSE/rf_4.h5


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t1_TSE/t1_TSE_matrx16x16_fixed_delay.xml


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t1_TSE/t1_TSE_matrx16x16_myGrad.xml


+ 432 - 0
LF_scanner/new_MRI_pulse_seq/t1_TSE/write_TSE_T1.py

@@ -0,0 +1,432 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_gauss_pulse import make_gauss_pulse
+from pypulseq.make_adiabatic_pulse import make_adiabatic_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+def FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.45  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(FS_sat_frequency), freq_offset=FS_sat_frequency)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+def inversion_block(params, scanner_parameters):
+    #function creates inversion recovery block with delay
+    params['IR_time'] = 0.140  # STIR # TODO add to GUI
+    #params['IR_time'] = 2.250  # FLAIR # TODO add to GUI
+    flip_ir = round(180 * pi / 180)
+    rf_ir, gz_ir, _ = make_sinc_pulse(flip_angle=flip_ir, system=scanner_parameters, duration=params['t_ref'],
+                                      slice_thickness=params['sl_thkn'], apodization=0.3,
+                                      time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                      return_gz=True)
+    delay_IR = np.ceil(params['IR_time'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_ir, gz_ir, delay_IR
+
+def SPAIR_block(params, scanner_parameters, g_rf_area):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.45  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    params['IR_time'] = 0.140  # SPAIR # TODO add to GUI
+
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+    flip_SPAIR = round(180 * pi / 180)
+    rf_SPAIR = make_adiabatic_pulse(pulse_type ="hypsec", system=scanner_parameters,
+                                    freq_offset=FS_sat_frequency)
+    gx_SPAIR = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_SPAIR = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    delay_IR = np.ceil(params['IR_time'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number, order):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    if order == 'non_linear':
+        a = k_space_list_with_zero[:((shift-index_central_line)*2+1)]
+        b = k_space_list_with_zero[((shift-index_central_line)*2+1):]
+        for i in range(1, int(len(b)/2)+1):
+            a.append(b[i-1])
+            a.append(b[-i])
+        a.append(b[i])
+        k_space_list_with_zero = a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning, FS: bool, IR: bool, SPAIR):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1': #TODO: create general path
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\TSE\IR_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'T2':
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\TSE\IR_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        elif SPAIR:
+            with open('C:\MRI_seq_files_mess\TSE\SPAIR_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'PD':
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    flip_fs = round(110 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    if FS: #TODO add to GUI choise of including or not Fat Sat block
+        g_rf_area = gz_ex.area * 10
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs)
+
+    if IR: #TODO add to GUI choise of including or not Inversion block
+        rf_ir, gz_ir, delay_IR = inversion_block(params, scanner_parameters)
+
+    if SPAIR: #TODO add to GUI choise of including or not Inversion block
+        g_rf_area = gz_ex.area * 10
+        rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR = SPAIR_block(params, scanner_parameters, g_rf_area)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']), 'non_linear')  # TODO: to create additiolal functions on different k space order filling
+    k_space_save = {'k_space_order': k_space_order_filing}
+
+    output_filename = "k_space_order_filing"  #save for reconstruction outside the jemris
+    # output_filename = "TSE_T1" + datetime.now().strftime("%Y%m%d_%H%M%S")
+    with open(output_filename + ".json", 'w') as outfile:
+        j.dump(k_space_save, outfile)
+
+
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+    if FS:
+        block_duration += calc_duration(gx_fs)
+    if IR:
+        block_duration += max(calc_duration(rf_ir), calc_duration(gz_ir))
+        block_duration += calc_duration(delay_IR)
+    if SPAIR:
+        block_duration += calc_duration(gx_SPAIR)
+        block_duration += calc_duration(delay_IR)
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+                    if FS:
+                        seq.add_block(gx_fs, gy_fs, rf_fs)
+                    if IR:
+                        seq.add_block(gz_ir, rf_ir)
+                        seq.add_block(delay_IR)
+                    if SPAIR:
+                        seq.add_block(gx_SPAIR, gy_SPAIR, rf_SPAIR)
+                        seq.add_block(delay_IR)
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        if FS:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE\\FS_t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE\\FS_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE\\FS_pd_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE')
+
+            else:
+                print('Please choose image weightning')
+        elif IR:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE\\IR_t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE\\IR_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE')
+            else:
+                print('Please choose image weightning')
+        elif SPAIR:
+            if weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_SPAIR_TSE\\SPAIR_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='SPAIR_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_SPAIR_TSE')
+
+        else:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE')
+            else:
+                print('Please choose image weightning')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T1', FS=False, IR=False, SPAIR=False)

BIN
LF_scanner/new_MRI_pulse_seq/t1_se/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_se/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_se/rf_3.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_se/rf_4.h5


BIN
LF_scanner/new_MRI_pulse_seq/t1_se/with_gxspoi_without_phases_offsets/different_contrasts.pptx


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/IR_t2_TSE_matrx16x16_myGrad.xml


BIN
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/T2_STIR_TSE_1.png


BIN
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/T2_STIR_TSE_2.png


BIN
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_IR_TSE/rf_3.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE/rf_3.h5


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t2_TSE/t2_TSE_matrx16x16_fixed_delay.xml


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t2_TSE/t2_TSE_matrx16x16_myGrad.xml


+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/__init__.py


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_1.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_2.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_3.h5


BIN
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/rf_4.h5


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/t2_TSE_RESTORE_matrx16x16_myGrad.xml


+ 289 - 0
LF_scanner/new_MRI_pulse_seq/t2_TSE_RESTORE/write_TSE_T2_RESTORE.py

@@ -0,0 +1,289 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T2':
+        with open('C:\MRI_seq_files_mess\TSE\RESTORE_T2.json', 'rb') as f:
+            params = j.load(f)
+    else:
+        print('exists only for T2')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Restore RF pulse -90
+    rf_restore, gz_resto, _ = make_sinc_pulse(flip_angle=-flip90, system=scanner_parameters, duration=params['t_ex'],
+                                              slice_thickness=params['sl_thkn'], apodization=0.3,
+                                              time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+    pulse_offsets_restore = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                        params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_resto.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_restore = t_exwd
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+    gz_restore = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_resto.amplitude,
+                                flat_time=t_restore, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: to create additiolal functions on different k space order filling
+    k_space_save = {'k_space_order': k_space_order_filing}
+    output_filename = "k_space_order_filing"  # save for reconstruction outside the jemris
+    # output_filename = "TSE_T1" + datetime.now().strftime("%Y%m%d_%H%M%S")
+    with open(output_filename + ".json", 'w') as outfile:
+        j.dump(k_space_save, outfile)
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL'])):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz180))
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(rf90), calc_duration(gz90))
+    block_duration += calc_duration(gz_cr)
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    rf_restore.freq_offset = pulse_offsets_restore[curr_slice]
+                    rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                            seq.add_block(gz180, rf180)
+                            seq.add_block(gz_reph, gx_pre)
+                            seq.add_block(gz_restore, rf_restore)
+                            seq.add_block(gz_cr)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if weightning == 'T2':
+        seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE_RESTORE\\t2_TSE_RESTORE_matrix32x32.seq')  # Save to disk
+        seq2xml(seq, seq_name='t2_TSE_RESTORE_matrx32x32',
+                out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE_RESTORE')
+
+    else:
+        print('works only with T2')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T2')

+ 106 - 0
LF_scanner/paper.bib

@@ -0,0 +1,106 @@
+@article{layton2017pulseq,
+  title={Pulseq: a rapid and hardware-independent pulse sequence prototyping framework},
+  doi={10.1002/mrm.26235},
+  author={Layton, Kelvin J and Kroboth, Stefan and Jia, Feng and Littin, Sebastian and Yu, Huijun and Leupold, Jochen and Nielsen, Jon-Fredrik and St{\"o}cker, Tony and Zaitsev, Maxim},
+  journal={Magnetic resonance in medicine},
+  volume={77},
+  number={4},
+  pages={1544--1552},
+  year={2017},
+  publisher={Wiley Online Library}
+}
+
+@inproceedings{ravi2018amri,
+  address = {Washington, D.C.},
+  author = {Ravi, Keerthi Sravan and Geethanath, Sairam and Weber Jochen and Vaughan, John Thomas},
+  booktitle = {ISMRM Workshop on Machine Learning Part II,},
+  title = {MR Value driven Autonomous MRI using imr-framework},
+  year = {2018}
+}
+
+@inproceedings{ravi2018imrframework,
+  author = {Ravi, Keerthi Sravan and Geethanath, Sairam and Vaughan, John Thomas},
+  booktitle = {i2i Workshop},
+  title = {imr-framework for rapid design and deployment of non-Cartesian sequences},
+  url = {cai2r.net/i2i},
+  year = {2018}
+}
+
+@article{ravi2018pulseq-gpi,
+  title={Pulseq-Graphical Programming Interface: Open source visual environment for prototyping pulse sequences and integrated magnetic resonance imaging algorithm development},
+  doi={10.1016/j.mri.2018.03.008},
+  author={Ravi, Keerthi Sravan and Potdar, Sneha and Poojar, Pavan and Reddy, Ashok Kumar and Kroboth, Stefan and Nielsen, Jon-Fredrik and Zaitsev, Maxim and Venkatesan, Ramesh and Geethanath, Sairam},
+  journal={Magnetic resonance imaging},
+  volume={52},
+  pages={9--15},
+  year={2018},
+  publisher={Elsevier}
+}
+
+@inproceedings{gehua2019ismrm,
+  address = {Montreal, Canada},
+  author = {Tong, Gehua and Geethanath, Sairam and Qian, Enlin and Ravi, Keerthi Sravan and Jimeno Manso, Marina and Vaughan, John Thomas},
+  booktitle = {ISMRM 27th Annual Meeting and Exhibition},
+  title = {Virtual MR Scanner Software},
+  year = {2019}
+}
+
+@article{jochimsen2004odin,
+  title={ODIN—object-oriented development interface for NMR},
+  doi={10.1016/j.jmr.2004.05.021},
+  author={Jochimsen, Thies H and Von Mengershausen, Michael},
+  journal={Journal of Magnetic Resonance},
+  volume={170},
+  number={1},
+  pages={67--78},
+  year={2004},
+  publisher={Elsevier}
+}
+
+@article{magland2016pulse,
+  title={Pulse sequence programming in a dynamic visual environment: SequenceTree},
+  doi={10.1002/mrm.25640},
+  author={Magland, Jeremy F and Li, Cheng and Langham, Michael C and Wehrli, Felix W},
+  journal={Magnetic resonance in medicine},
+  volume={75},
+  number={1},
+  pages={257--265},
+  year={2016},
+  publisher={Wiley Online Library}
+}
+
+@article{nielsen2018toppe,
+  title={TOPPE: A framework for rapid prototyping of MR pulse sequences},
+  doi={10.1002/mrm.26990},
+  author={Nielsen, Jon-Fredrik and Noll, Douglas C},
+  journal={Magnetic resonance in medicine},
+  volume={79},
+  number={6},
+  pages={3128--3134},
+  year={2018},
+  publisher={Wiley Online Library}
+}
+
+@article{poojar2019rapid,
+  title={Rapid prOtotyping of 2D non-CartesIan K-space trajEcTories (ROCKET) using Pulseq and GPI},
+  doi={10.1615/CritRevBiomedEng.2019029380},
+  author={Poojar, Pavan and Geethanath, Sairam and Reddy, Ashok Kumar and Venkatesan, Ramesh},
+  journal={Critical Reviews™ in Biomedical Engineering},
+  publisher={Begel House Inc.}
+}
+
+@inproceedings{ravi2019accessible-amri,
+  address = {New Delhi, India},
+  author = {Ravi, Keerthi Sravan and Geethanath, Sairam and Vaughan, John Thomas},
+  booktitle = {ISMRM Workshop on Accessible MRI for the World},
+  title = {Autonomous scanning using imr-framework to improve MR accessibility},
+  year = {2019}
+}
+
+@inproceedings{ravi2019selfadmin,
+  address = {Montreal, Canada},
+  author = {Ravi, Keerthi Sravan and Geethanath, Sairam and Vaughan, John Thomas},
+  booktitle = {ISMRM 27th Annual Meeting and Exhibition},
+  title = {Self-administered exam using Autonomous Magnetic Resonance Imaging (AMRI)},
+  year = {2019}
+}

+ 106 - 0
LF_scanner/paper.md

@@ -0,0 +1,106 @@
+---
+title: 'PyPulseq: A Python Package for MRI Pulse Sequence Design'
+tags:
+  - Python
+  - MRI
+  - pulse sequence design
+  - vendor neutral
+authors:
+  - name: Keerthi Sravan Ravi
+    orcid: 0000-0001-6886-0101
+    affiliation: 1
+  - name: Sairam Geethanath
+    orcid: 0000-0002-3776-4114
+    affiliation: 1
+  - name: John Thomas Vaughan Jr.
+    orcid: 0000-0002-6933-3757
+    affiliation: 1  
+affiliations:
+ - name: Columbia Magnetic Resonance Research Center, Columbia University in the City of New York, USA
+   index: 1
+date: 21 August 2019
+bibliography: paper.bib
+---
+
+# Summary
+
+Magnetic Resonance Imaging (MRI) is a critical component of healthcare. MRI data is acquired by playing a series of 
+radio-frequency and magnetic field gradient pulses. Designing these pulse sequences requires knowledge of specific 
+programming environments depending on the vendor hardware (generations) and software (revisions) intended for 
+implementation. This impedes the pace of prototyping. Pulseq [@layton2017pulseq] introduced an open source file 
+standard for pulse sequences that can be deployed on Siemens/GE via [TOPPE](https://toppemri.github.io) 
+[@nielsen2018toppe]/[Bruker](https://github.com/pulseq/bruker_interpreter) platforms. In this work, we introduce 
+`PyPulseq`, which enables pulse sequence programming in Python. Its advantages are zero licensing fees and easy 
+integration with deep learning methods developed in Python. `PyPulseq` is aimed at MRI researchers, faculty, students, 
+and other allied field researchers such as those in neuroscience. We have leveraged this tool for several published 
+research works [@poojar2019rapid; @gehua2019ismrm; @ravi2018amri; @ravi2019accessible-amri; @ravi2018imrframework; 
+@ravi2019selfadmin].
+
+# Statement of need
+
+MRI is a non-invasive diagnostic imaging tool. It is a critical component of healthcare and has a significant impact on 
+diagnosis and treatment assessment. Structural, functional and metabolic MRI generate valuable information that aid in 
+the accurate diagnosis of a wide range of pathologies. A unique strength of MRI is the ability to visualise diverse 
+pathologies achieved by the flexibility in designing tailored pulse sequences. MRI pulse sequences are a collection of 
+radio-frequency and gradient waveforms that are executed on the scanner hardware to acquire raw data. 
+
+Research efforts on pulse sequence design are directed at achieving faster scan times, improving tissue contrast and 
+increasing Signal-to-Noise Ratio (SNR). However, designing pulse sequences requires knowledge of specific programming 
+environments depending on the vendor hardware (generations) and software (revisions) intended for implementation. 
+Typically, MRI researchers program and simulate the pulse sequences on computers and execute them on MRI scanners. This 
+typically involves considerable effort, impeding the pace of prototyping and therefore research and development. This 
+also hampers multi-site multi-vendor studies as it requires researchers to be acquainted with each vendor's programming 
+environment. Furthermore, harmonizing acquisition across MRI vendors will enable reproducible research. This work 
+introduces an open source tool that enables pulse sequence programming for Siemens/GE/Bruker platforms in Python, based 
+on the Pulseq standard [@layton2017pulseq].
+
+# Introduction to the Pulseq file format: `.seq`
+
+The `.seq` file format introduced in Pulseq [@layton2017pulseq] is a novel way to capture a pulse sequence as plain 
+text. The file format was designed with the following design criteria in mind: human-readable, easily parsable, vendor 
+independent, compact and low-level [@layton2017pulseq]. A pulse sequence comprises of radiofrequency pulses, magnetic 
+field gradient waveforms, delays or analog-to-digital converter (ADC) readout *events*. A *block* comprises of one or 
+more *events* occurring simultaneously. *Event* envelopes are defined by *shapes*, which are run-length encoded and 
+stored in the `.seq` file. In a `.seq` file, each *event* and *shape* is identified uniquely by an integer. *Blocks* 
+are constructed by assembling the uniquely referenced *events*. Therefore, any custom pulse sequence can be synthesised 
+by concatenating *blocks*.
+
+# About `PyPulseq`
+
+The `PyPulseq` package presented in this work is an open source vendor-neutral MRI pulse sequence design tool. It 
+enables researchers and users to program pulse sequences in Python, and export them as a `.seq` file. These `.seq` files 
+can be executed on the three MRI vendors by leveraging vendor-specific interpreters. The MRI methods have been reported 
+previously [@ravi2018pulseq-gpi]. The `PyPulseq` package allows for both representing and deploying custom sequences. 
+This work focuses on the software aspect of the tool. `PyPulseq` was entirely developed in Python, and this has multiple 
+advantages. Firstly, unlike existing C++ frameworks such as ODIN [@jochimsen2004odin] and SequenceTree [@magland2016pulse],
+`PyPulseq` does not require any compilation of the pulse sequence scripts. Secondly, it does not involve any licensing 
+fees that are otherwise associated with other scientific research platforms such as MATLAB. Thirdly, there has been a 
+proliferation of deep learning projects developed in Python in recent years. These advantages allow `PyPulseq` to be 
+integrated with projects related to various stages of the MRI pipeline. For example - deep learning techniques for 
+acquisition (intelligent slice planning in @ravi2018amri) and related downstream reconstruction. Finally, the 
+standard Python package manager - PyPI - enables convenient installs on multiple OS platforms. These Python-derived 
+benefits ensure that `PyPulseq` can reach a wider audience.
+
+We have leveraged the `PyPulseq` library to implement acquisition oriented components of the Autonomous MRI (AMRI) 
+package [@ravi2018amri; @ravi2019accessible-amri; @ravi2019selfadmin], Virtual Scanner [@gehua2019ismrm], and the 
+non-Cartesian acquisition library [@ravi2018imrframework]. Also, the [`PyPulseq-gpi`](https://github.com/imr-framework/pypulseq/tree/pypulseq-gpi) branch 
+integrates a previous version of `PyPulseq` with [GPI](http://gpilab.com/) to enable GUI-based pulse sequence design. This work has 
+been previously reported [@ravi2018pulseq-gpi] and is not within the scope of this JOSS submission. Currently, 
+`PyPulseq` does not support external triggers and interactive slice planning. Raw data acquired with pulse sequences 
+designed with `PyPulseq` cannot be reconstructed vendor-supplied tools. `PyPulseq` is a translation of Pulseq from 
+MATLAB [@layton2017pulseq].
+
+# Target audience
+
+`PyPulseq` is aimed at MRI researchers focusing on pulse sequence design, image reconstruction, and MRI physics. We also 
+envisage PyPulseq to be utilized for replicability and reproducibility studies such as those for functional MRI 
+(multi-site, multi-vendor). The package could also serve as a hands-on teaching aid for MRI faculty and students. 
+Beginners can get started with the bundled example pulse sequences. More familiar users can import the appropriate 
+packages to construct and deploy custom pulse sequences.
+
+# Acknowledgements
+
+This study was funded (in part) by the 'MR Technology Development Grant' and the 'Seed Grant Program for MR Studies' 
+of the Zuckerman Mind Brain Behavior Institute at Columbia University (PI: Geethanath).
+
+# References

+ 31 - 0
LF_scanner/py2jemris/.github/ISSUE_TEMPLATE/bug_report.md

@@ -0,0 +1,31 @@
+---
+name: Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Desktop (please complete the following information):**
+ - OS: [e.g. iOS]
+ - Python Version [e.g. 3.8.1]
+
+**Additional context**
+Add any other context about the problem here.

+ 20 - 0
LF_scanner/py2jemris/.github/ISSUE_TEMPLATE/feature_request.md

@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.

+ 120 - 0
LF_scanner/py2jemris/.gitignore

@@ -0,0 +1,120 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+# database
+.db
+
+# Work in progress code
+src_working/
+
+# PyCharm idea files
+\.idea/
+/.idea/
+
+serverlog\.txt
+
+src/server/registration/subject\.db
+
+\.DS_Store

+ 76 - 0
LF_scanner/py2jemris/CODE_OF_CONDUCT.md

@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at imr-framework2018@gmail.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq

+ 27 - 0
LF_scanner/py2jemris/CONTRIBUTING.md

@@ -0,0 +1,27 @@
+# Contributing to `py2jemris`
+:thumbsup: :tada: Thanks for taking time to contribute! :thumbsup: :tada:
+
+Here are guidelines (not rules!) for contributing to `py2jemris`. Use your best judgment, and feel free to propose changes to this document in a pull request.
+
+## Table of contents
+1. [Code of Conduct](#code-of-conduct)
+2. [PEP Style Guide for Python coding](#style-guide-for-python-code)
+
+## Code of Conduct
+This project and everyone participating in it is governed by the 
+[`py2jemris` Code of Conduct][code_of_conduct]. 
+By participating, you are expected to uphold this code. Please report unacceptable behavior to 
+[imr.framework2018@gmail.com](email).
+
+## Pull requests
+Follow the coding conventions laid out in the [Style Guide for Python Code](style_guide). Ensure source code is 
+documented as per the Numpy convention [[numpy1]], [[numpy2]]. If you notice any `pypulseq` code not adhering to [PEP8](style-guide), submit a pull request or open an issue.
+
+## Issues
+Please adhere to the appropriate templates when reporting bugs or requesting features. The templates are automatically presented via Github's 'New Issue' feature.
+
+[email]: mailto:imr.framework2018@gmail.com
+[code_of_conduct]: https://github.com/imr-framework/py2jemris/blob/master/CODE_OF_CONDUCT.md
+[style_guide]: https://www.python.org/dev/peps/pep-0008/
+[numpy1]: https://numpydoc.readthedocs.io/en/latest/format.html
+[numpy2]: https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html

+ 674 - 0
LF_scanner/py2jemris/LICENSE

@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.

+ 27 - 0
LF_scanner/py2jemris/README.md

@@ -0,0 +1,27 @@
+# py2jemris
+Python library for interfacing with the [JEMRIS][JEMRIS_repo] MR simulator.
+* Convert [Pulseq][Pulseq_repo]/[PyPulseq][PyPulseq_repo] sequence files (.seq) into JEMRIS format for simulation
+* Construct custom Tx/Rx coil maps and numerical phantoms
+* Perform JEMRIS simulation pipeline for rapid .seq file testing 
+
+## Introduction
+The JEMRIS project provides a fast and robust Bloch simulation core for Magentic Resonance Imaging (MRI) experiments, along with sequence design functions. The sequence representation in JEMRIS is high level, consisting of nested loops and parameter dependencies across sequence components [[1]](#references). In contrast, the Pulseq MR sequence standard represents the sequence in unrolled, consecutive blocks, with no interdependencies between blocks [[2]](@references). 
+
+Importantly, Pulseq is mainly intended for sequence development and can be interfaced to three main vendors for open-source acquisition. While JEMRIS can convert its sequences (typically, an .xml sequence construction file with a list of .h5 waveform data files) into the Pulseq format, it does not allow the reverse operation - converting any Pulseq sequence into a form ready for JEMRIS simulation. We developed py2jemris in order to incorporate simulations into our fully open-source sequence development cycle, as PyPulseq [[3]](#references) scripting allows flexible and rapid open-source sequence construction. 
+
+## Usage
+py2jemris is intended for rapid MR sequence development - it enables dual simulation/acquisition using the same sequence file. 
+
+## Get Started
+To get started, clone the repository and read the function docstrings. You will need to have JEMRIS installed on your system. A Wiki page and a Google Colab Notebook will be available soon.  
+
+## References 
+1. Stöcker, T., Vahedipour, K., Pflugfelder, D., & Shah, N. J. (2010). High‐performance computing MRI simulations. Magnetic resonance in medicine, 64(1), 186-193.
+2. Layton, K. J., Kroboth, S., Jia, F., Littin, S., Yu, H., Leupold, J., ... & Zaitsev, M. (2017). Pulseq: a rapid and hardware‐independent pulse sequence prototyping framework. Magnetic resonance in medicine, 77(4), 1544-1552.
+3. Ravi, K. S., Geethanath, S., & Vaughan, J. T. (2019). PyPulseq: A Python Package for MRI Pulse Sequence Design. Journal of Open Source Software, 4(42), 1725.
+
+
+
+[Pulseq_repo]: https://github.com/pulseq/pulseq
+[PyPulseq_repo]: https://github.com/imr-framework/pypulseq
+[JEMRIS_repo]: https://github.com/JEMRIS/jemris

+ 0 - 0
LF_scanner/py2jemris/__init__.py


+ 1 - 0
LF_scanner/py2jemris/benchmark_seq2xml/.jemris_progress.out

@@ -0,0 +1 @@
+0

BIN
LF_scanner/py2jemris/benchmark_seq2xml/.spins_state.dat


+ 0 - 0
LF_scanner/py2jemris/benchmark_seq2xml/__init__.py


+ 64 - 0
LF_scanner/py2jemris/benchmark_seq2xml/benchmark_seq_files.py

@@ -0,0 +1,64 @@
+from pypulseq.Sequence.sequence import Sequence
+import matplotlib.pyplot as plt
+import h5py
+
+# Load both files
+
+
+q1 = 3
+q2 = 6
+
+seq_orig = Sequence()
+seq_orig.read('gre_pypulseq.seq')
+print('Seq original')
+print(seq_orig.get_block(q1).gx)
+
+
+
+seq_proc = Sequence()
+seq_proc.read('gre_jemris_seq2xml_jemris.seq')
+
+#print("Seq processed:")
+#print(seq_proc.get_block(q2).gx)
+
+#seq_orig.plot(time_range=[0,10])
+
+#seq_proc.plot(time_range=[0,10])
+
+
+sd = h5py.File('gre_jemris_seq2xml_jemris.h5','r')
+sd = sd['seqdiag']
+plt.figure(1)
+plt.subplot(411)
+plt.title("Twice converted JEMRIS sequence diagram")
+
+plt.plot(sd['T'][()],sd['TXM'][()])
+plt.subplot(412)
+plt.plot(sd['T'][()],sd['GX'][()])
+plt.subplot(413)
+plt.plot(sd['T'][()],sd['GY'][()])
+plt.subplot(414)
+plt.plot(sd['T'][()],sd['GZ'][()])
+
+
+
+
+sd = h5py.File('gre.h5','r')
+sd = sd['seqdiag']
+plt.figure(2)
+
+plt.subplot(411)
+plt.title("Original JEMRIS sequence diagram")
+
+plt.plot(sd['T'][()],sd['TXM'][()])
+plt.subplot(412)
+plt.plot(sd['T'][()],sd['GX'][()])
+plt.subplot(413)
+plt.plot(sd['T'][()],sd['GY'][()])
+plt.subplot(414)
+plt.plot(sd['T'][()],sd['GZ'][()])
+
+
+
+
+plt.show()

BIN
LF_scanner/py2jemris/benchmark_seq2xml/ext_rf.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/gre.h5


+ 23 - 0
LF_scanner/py2jemris/benchmark_seq2xml/gre.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Parameters FOVx="128" FOVy="128" FOVz="1" Name="P" Nx="32" Ny="32" Nz="1" TE="8" TR="50">
+   <ConcatSequence Name="R">
+      <ConcatSequence Name="C" Observe="NY=P.Ny" Repetitions="NY">
+         <ATOMICSEQUENCE Name="A1">
+            <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="20" InitialPhase="C*(C+1)*50" Name="P1" Observe="C=C.Counter"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TE" DelayType="C2C" Name="D1" Observe="TE=P.TE" StartSeq="A1" StopSeq="A3"/>
+         <ATOMICSEQUENCE Name="A2">
+            <TRAPGRADPULSE Area="-A/2" Axis="GX" Name="P2" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-KMY+C*DKY" Axis="GY" Name="P3" Observe="KMY=P.KMAXy, C=C.Counter, DKY=P.DKy"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A3">
+            <TRAPGRADPULSE ADCs="NX" Axis="GX" FlatTopArea="2*KMX" FlatTopTime="4" Name="P4" Observe="KMX=P.KMAXx, NX=P.Nx" PhaseLock="1"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A4">
+            <TRAPGRADPULSE Area="1.5*A" Axis="GX" Name="P6" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-A" Axis="GY" Name="P7" Observe="A=P3.Area"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TR" DelayType="B2E" Name="D2" Observe="TR=P.TR" StartSeq="A1"/>
+      </ConcatSequence>
+   </ConcatSequence>
+</Parameters>

File diff suppressed because it is too large
+ 0 - 0
LF_scanner/py2jemris/benchmark_seq2xml/gre_jemris_seq2xml.xml


BIN
LF_scanner/py2jemris/benchmark_seq2xml/gre_jemris_seq2xml_jemris.h5


+ 1 - 0
LF_scanner/py2jemris/benchmark_seq2xml/mysimu2.xml

@@ -0,0 +1 @@
+<simulate name="JEMRIS"><sample name="Sample" uri="sample.h5" /><TXcoilarray uri="uniform.xml" /><RXcoilarray uri="uniform.xml" /><parameter ConcomitantFields="0" EvolutionPrefix="evol" EvolutionSteps="0" RandomNoise="0" /><sequence name="Sequence" uri="gre_jemris_seq2xml.xml" /><model name="Bloch" type="CVODE" /></simulate>

BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_1.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_2.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_3.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_4.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_5.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_6.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_7.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/rf_8.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/sample.h5


BIN
LF_scanner/py2jemris/benchmark_seq2xml/seq_compare.PNG


BIN
LF_scanner/py2jemris/benchmark_seq2xml/seq_compare_zoomed.PNG


BIN
LF_scanner/py2jemris/benchmark_seq2xml/signals.h5


+ 4 - 0
LF_scanner/py2jemris/benchmark_seq2xml/uniform.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<CoilArray>
+  <IdealCoil/>
+</CoilArray>

+ 115 - 0
LF_scanner/py2jemris/coil2xml.py

@@ -0,0 +1,115 @@
+# Take user inputs and convert them into either coil2xml or
+
+import xml.etree.ElementTree as ET
+
+import h5py
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+
+def coil2xml(b1maps=None, coil_design=None, fov=256, name='coils', out_folder=''):
+    """
+    Inputs
+    ------
+    b1maps : list, optional
+        List of np.ndarray (dtype='complex') maps for all channels
+    coil_design : dict, optional
+        Dictionary containing information on coil design (see documentation)
+    fov : float
+        Field-of-view of coil in mm
+    name : str, optional
+        Name of generated .xml file
+    out_folder : str, optional
+        Path to directory where the output .h5 is stored
+
+
+    Returns
+    -------
+    None
+
+    """
+    if b1maps is None and coil_design is None:
+        raise ValueError("One of b1map and coil_design must be provided")
+
+    # b1 map case
+    if b1maps is not None:
+
+        root = ET.Element('CoilArray')
+
+        for ch in range(len(b1maps)): # for each channel
+            # Check dimensions
+            if len(b1maps[ch].shape) < 2 or len(b1maps[ch].shape) > 3 \
+                    or max(b1maps[ch].shape) != min(b1maps[ch].shape):
+                raise ValueError("b1map must be a 2D square or 3D cubic array: \
+                                  all sides must be equal")
+
+            N = b1maps[ch].shape[0]
+            dim = len(b1maps[ch].shape)
+
+            b1_magnitude = np.absolute(b1maps[ch])
+            b1_phase = np.angle(b1maps[ch])
+
+            # Make h5 file
+            coil_h5_path = name + f'_ch{ch+1}.h5'
+            coil = h5py.File(out_folder + '/' + coil_h5_path, 'a')
+            if 'maps' in coil.keys():
+                del coil['maps']
+            maps = coil.create_group('maps')
+            magnitude = maps.create_dataset('magnitude',b1_magnitude.shape,dtype='f')
+            phase = maps.create_dataset('phase',b1_phase.shape,dtype='f')
+
+
+            # Set h5 file contents
+            if dim == 2:
+                magnitude[:,:] = b1_magnitude
+                phase[:,:] = b1_phase
+
+            elif dim == 3:
+                print('3d!')
+                magnitude[:,:,:] = b1_magnitude
+                phase[:,:,:] = b1_phase
+
+
+            coil.close()
+
+            # Add corresponding coil to .xml tree
+            externalcoil = ET.SubElement(root, "EXTERNALCOIL")
+            externalcoil.set("Points",str(N))
+            externalcoil.set("Name",f"C{ch+1}")
+            externalcoil.set("Filename",coil_h5_path)
+            externalcoil.set("Extent",str(fov)) # fov is in mm
+            externalcoil.set("Dim",str(dim))
+
+        coil_tree = ET.ElementTree(root)
+        coil_tree.write(out_folder + '/' + name + '.xml')
+
+
+
+
+if __name__ == '__main__':
+    # a = h5py.File('coil2xml/sensmaps.h5','a')
+    # print(a['maps'].keys())
+    # print(a['maps']['magnitude'].keys())
+    # map_mag = a['maps']['magnitude']
+    # map_phase = a['maps']['phase']
+    #
+    #
+    # plt.figure(1)
+    #
+    # for ch in range(8):
+    #     plt.subplot(2,8,ch+1)
+    #     plt.imshow(map_mag[f'0{ch}'][()])
+    #     plt.subplot(2,8,ch+9)
+    #     plt.imshow(map_phase[f'0{ch}'][()])
+    #
+    # plt.show()
+
+    b1 = np.ones((32,32))
+    XY = np.meshgrid(np.linspace(0,1,32), np.linspace(0,1,32))
+    X = XY[0]
+    Y = XY[1]
+    b1 = np.sqrt(X**2 + Y**2)
+    plt.imshow(b1)
+    plt.show()
+    coil2xml(b1maps=[b1], fov=200, name='test_coil', out_folder='coil2xml')

+ 30 - 0
LF_scanner/py2jemris/examine_seq_diag.py

@@ -0,0 +1,30 @@
+import h5py
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+sp1 = h5py.File('sim/test0501/gre_test_0417.h5','r')
+sp2 = h5py.File('sim/test0501/g','r')
+
+print(sp1['seqdiag'].keys())
+print(sp2['seqdiag'].keys())
+
+
+name = 'RXP'
+
+plt.figure(1)
+plt.subplot(211)
+plt.title(name + ' (original)')
+plt.plot(sp1['seqdiag/T'], sp1['seqdiag' + f'/{name}'],'*')
+plt.subplot(212)
+plt.title(name + ' (twice)')
+plt.plot(sp2['seqdiag/T'], sp2['seqdiag' + f'/{name}'],'*')
+
+plt.show()
+
+
+rxp_1 = np.array(sp1['seqdiag/RXP'])
+rxp_2 = np.array(sp2['seqdiag/RXP'])
+
+print(rxp_1*180/np.pi)
+print(rxp_2*180/np.pi)

+ 15 - 0
LF_scanner/py2jemris/make_some_seqs.py

@@ -0,0 +1,15 @@
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.opts import Opts
+from pypulseq.make_arbitrary_grad import make_arbitrary_grad
+import numpy as np
+
+seq = Sequence(system=Opts())
+
+gx = make_arbitrary_grad(channel='x',waveform=np.array([1,2,3,4,5,4,3,2,1]))
+seq.add_block(gx)
+
+seq.write("hiseq.seq")
+
+seq2 = Sequence()
+seq2.read('hiseq.seq')
+print(seq2.get_block(1).gx)

+ 597 - 0
LF_scanner/py2jemris/phantom.py

@@ -0,0 +1,597 @@
+# Copyright of the Board of Trustees of Columbia University in the City of New York
+"""
+Numerical phantom generation and access
+"""
+
+import numpy as np
+import scipy.signal as ss
+import h5py
+import matplotlib.pyplot as plt
+
+class Phantom:
+    """Generic numerical phantom for MRI simulations
+
+    The phantom is mainly defined by three matrices of T1, T2, and PD values, respectively.
+    At the moment, each index in the matrix corresponds to a single spin group.
+    The overall physical size is determined by vsize; phantom voxels must be isotropic.
+
+    Parameters
+    ----------
+    T1map : numpy.ndarray
+        Matrix of T1 values in seconds
+    T2map : numpy.ndarray
+        Matrix of T2 values in seconds
+    PDmap : numpy.ndarray
+        Matrix PD values between 0 and 1
+    vsize : float
+        Voxel size in meters (isotropic)
+    dBmap : numpy.ndarray, optional
+        Matrix of B0 magnetic field variation across phantom
+        The default is 0 and means no variation
+    loc : tuple, optional
+        Overall location of phantom in meters; default is (0,0,0)
+
+    Attributes
+    ----------
+    fov : numpy.ndarray
+        [fov_x, fov_y, fov_z]
+        1 x 3 array of fields-of-view in x, y, and z directions
+    Xs : numpy.ndarray
+        1D array of all x locations in phantom
+    Ys : numpy.ndarray
+        1D array of all y locations in phantom
+    Zs : numpy.ndarray
+        1D array of all z locations in phantom
+
+    """
+    def __init__(self,T1map,T2map,PDmap,vsize,dBmap=0,loc=(0,0,0)):
+        self.vsize = vsize
+        self.T1map = T1map
+        self.T2map = T2map
+        self.PDmap = PDmap
+        self.vsize = vsize
+        self.dBmap = dBmap
+        self.loc = loc
+
+        # Find field-of-view
+        self.fov = vsize*np.array(np.shape(T1map))
+
+        # Make location vectors
+        ph_shape = np.shape(self.PDmap)
+
+        # Define coordinates
+        self.Xs = self.loc[0]+np.arange(-self.fov[0] / 2 + vsize / 2, self.fov[0] / 2, vsize)
+        self.Ys = self.loc[1]+np.arange(-self.fov[1] / 2 + vsize / 2, self.fov[1] / 2, vsize)
+        self.Zs = self.loc[2]+np.arange(-self.fov[2] / 2 + vsize / 2, self.fov[2] / 2, vsize)
+
+    def get_location(self,indx):
+        """Returns (x,y,z) physical location in meters at given indices
+
+        Parameters
+        ----------
+        indx : tuple or array_like
+            (ind1, ind2, ind3)
+            Index for querying
+
+        Returns
+        -------
+        x, y, z : float
+            physical location corresponding to index
+
+        """
+        return self.Xs[indx[0]], self.Ys[indx[1]], self.Zs[indx[2]]
+
+    def get_shape(self):
+        """Returns the phantom's matrix size
+
+        Returns
+        -------
+        shape : tuple
+            The matrix size in three dimensions
+
+        """
+        return np.shape(self.PDmap)
+
+    def get_params(self,indx):
+        """Returns PD, T1, and T2 at given indices
+
+        Parameters
+        ----------
+        indx : tuple
+            Index for querying
+
+        Returns
+        -------
+        PD, T1, T2 : float
+            Tissue parameters corresponding to the queried index
+
+        """
+        return self.PDmap[indx],self.T1map[indx],self.T2map[indx]
+
+    def get_list_locs(self):
+        """Returns a flattened 1D array of all location vectors [(x1,y1,z1),...,(xk,yk,zk)]
+
+        Returns
+        -------
+        list_locs : list
+
+        """
+        list_locs = []
+        for x in self.Xs:
+            for y in self.Ys:
+                for z in self.Zs:
+                    list_locs.append((x, y, z))
+        return list_locs
+
+    def get_list_inds(self):
+        """Returns a flattened 1D array of all indices in phantom [(u1,v1,w1),...,(uk,vk,wk)]
+
+        Returns
+        -------
+        list_inds : list
+
+        """
+        list_inds = []
+        sh = self.get_shape()
+        for u in range(sh[0]):
+            for v in range(sh[1]):
+                for w in range(sh[2]):
+                    list_inds.append((u,v,w))
+        return list_inds
+
+    def output_h5(self, output_folder, name='phantom'):
+        """
+        Inputs
+        ------
+        output_folder : str
+            Folder in which to output the h5 file
+        """
+
+        GAMMA = 2 * 42.58e6 * np.pi
+
+        pht_shape = list(np.flip(self.get_shape()))
+        dim = len(pht_shape)
+
+        pht_shape.append(5)
+
+
+        PDmap_au = np.swapaxes(self.PDmap,0,-1)
+        T1map_ms = np.swapaxes(self.T1map * 1e3, 0,-1)
+        T2map_ms = np.swapaxes(self.T2map * 1e3,0,-1)
+
+        T1map_ms_inv = np.where(T1map_ms > 0, 1/T1map_ms, 0)
+        T2map_ms_inv = np.where(T2map_ms > 0, 1/T2map_ms, 0)
+
+
+        if np.shape(self.dBmap) == tuple(pht_shape):
+            dBmap_rad_per_ms = np.swapaxes(self.dBmap * GAMMA * 1e-3, 0, -1)
+        else:
+            dBmap_rad_per_ms = self.dBmap * GAMMA * 1e-3
+
+
+
+
+        if len(output_folder) > 0:
+            output_folder += '/'
+        pht_file = h5py.File(output_folder + name + '.h5', 'a')
+        if "sample" in pht_file.keys():
+            del pht_file["sample"]
+
+        sample = pht_file.create_group('sample')
+
+        data = sample.create_dataset('data', tuple(pht_shape),
+                                     dtype='f')  # M0, 1/T1 [1/ms], 1/T2 [1/ms], 1/T2* [1/ms], chemical shift [rad/ms]
+        offset = sample.create_dataset('offset', (3, 1), dtype='f')
+        resolution = sample.create_dataset('resolution', (3, 1), dtype='f')
+
+        if dim == 1:
+            data[:, 0] = PDmap_au
+            #data[:, 1] = 1 / T1map_ms
+            data[:, 1] = T1map_ms_inv
+            #data[:, 2] = 1 / T2map_ms
+            data[:, 2] = T2map_ms_inv
+            #data[:, 3] = 1 / T2map_ms  # T2 assigned as T2* for now
+            data[:, 3] = T2map_ms_inv
+            data[:, 4] = dBmap_rad_per_ms
+
+        elif dim == 2:
+            data[:, :, 0] = PDmap_au
+            #data[:, :, 1] = 1 / T1map_ms
+            #data[:, :, 2] = 1 / T2map_ms
+            #data[:, :, 3] = 1 / T2map_ms  # T2 assigned as T2* for now
+            data[:, :, 1] = T1map_ms_inv
+            data[:, :, 2] = T2map_ms_inv
+            data[:, :, 3] = T2map_ms_inv
+            data[:, :, 4] = dBmap_rad_per_ms
+
+        elif dim == 3:
+            data[:, :, :, 0] = PDmap_au
+            #data[:, :, :, 1] = 1 / T1map_ms
+            #data[:, :, :, 2] = 1 / T2map_ms
+            #data[:, :, :, 3] = 1 / T2map_ms  # T2 assigned as T2* for now
+            data[:, :, :, 1] = T1map_ms_inv
+            data[:, :, :, 2] = T2map_ms_inv
+            data[:, :, :, 3] = T2map_ms_inv
+            data[:, :, :, 4] = dBmap_rad_per_ms
+
+
+        offset[:,0] = np.array(self.loc)*1000 # meters to mm conversion
+        resolution[:,0] = [self.vsize*1000]*3 # isotropic
+
+        pht_file.close()
+
+        return
+
+class DTTPhantom(Phantom):
+    """Discrete tissue type phantom
+
+    Phantom constructed from a finite set of tissue types and their parameters
+
+    Parameters
+    ----------
+    type_map : numpy.ndarray
+        Matrix of integers that map to tissue types
+    type_params : dict
+        Dictionary that maps tissue type number to tissue type parameters (PD,T1,T2)
+    vsize : float
+        Voxel size in meters (isotropic)
+    dBmap : numpy.ndarray, optional
+        Matrix of B0 magnetic field variation across phantom
+        The default is 0 and means no variation
+    loc : tuple, optional
+        Overall location of phantom; default is (0,0,0)
+
+    """
+
+    def __init__(self,type_map,type_params,vsize,dBmap=0,loc=(0,0,0)):
+        print(type(type_map))
+        self.type_map = type_map
+        self.type_params = type_params
+        T1map = np.ones(np.shape(type_map))
+        T2map = np.ones(np.shape(type_map))
+        PDmap = np.zeros(np.shape(type_map))
+
+        for x in range(np.shape(type_map)[0]):
+            for y in range(np.shape(type_map)[1]):
+                for z in range(np.shape(type_map)[2]):
+                    PDmap[x,y,z] = type_params[type_map[x,y,z]][0]
+                    T1map[x,y,z] = type_params[type_map[x,y,z]][1]
+                    T2map[x,y,z] = type_params[type_map[x,y,z]][2]
+
+        super().__init__(T1map,T2map,PDmap,vsize,dBmap,loc)
+
+
+class BrainwebPhantom(Phantom):
+    """This phantom is in development.
+
+    """
+
+    def __init__(self, filename,dsf=1,make2d=False,loc=0,dir='z',dBmap=0):
+        dsf = int(np.absolute(dsf))
+        bw_data = np.load(filename).all()
+        params = {k: np.array([v[3],v[0],v[1]]) for k, v in bw_data['params'].items()}
+
+        typemap =  bw_data['typemap']
+
+        dr = 1e-3 # 1mm voxel size
+
+        # If we want planar phantom, then let's take the slice!
+        if make2d:
+            if dir in ['sagittal','x']:
+                n = np.shape(typemap)[0]
+                xx = dr*(n-1)
+                loc_ind = int((n/xx)*loc + n/2)
+                if loc_ind < 0:
+                    loc_ind = 0
+                if loc_ind > n-1:
+                    loc_ind = n-1
+                typemap = typemap[[loc_ind],:,:]
+
+            elif dir in ['coronal','y']:
+                n = np.shape(typemap)[1]
+                yy = dr*(n-1)
+                loc_ind = int((n/yy)*loc + n/2)
+                if loc_ind < 0:
+                    loc_ind = 0
+                if loc_ind > n - 1:
+                    loc_ind = n - 1
+                typemap = typemap[:,[loc_ind],:]
+
+            elif dir in ['axial','z']:
+                n = np.shape(typemap)[2]
+                zz = dr*(n-1)
+                loc_ind = int((n/zz)*loc + n/2)
+                if loc_ind < 0:
+                    loc_ind = 0
+                if loc_ind > n - 1:
+                    loc_ind = n - 1
+                typemap = typemap[:,:,[loc_ind]]
+
+        # Make parm maps from typemap
+        a,b,c = np.shape(typemap)
+        T1map = np.ones((a,b,c))
+        T2map = np.ones((a,b,c))
+        PDmap = np.zeros((a,b,c))
+
+        for x in range(a):
+            for y in range(b):
+                for z in range(c):
+                    PDmap[x,y,z] = params[typemap[x,y,z]][0]
+                    T1map[x,y,z] = params[typemap[x,y,z]][1]
+                    T2map[x,y,z] = params[typemap[x,y,z]][2]
+
+        # Downsample maps
+        a,b,c = np.shape(PDmap)
+
+        if a == 1:
+            ax = [1,2]
+        elif b == 1:
+            ax = [0,2]
+        elif c == 1:
+            ax = [0,1]
+        else:
+            ax = [0,1,2]
+
+        for v in range(len(ax)):
+            PDmap = ss.decimate(PDmap, dsf, axis=ax[v], ftype='fir')
+            T1map = ss.decimate(T1map, dsf, axis=ax[v], ftype='fir')
+            T2map = ss.decimate(T2map, dsf, axis=ax[v], ftype='fir')
+
+
+        dr = dr*dsf
+        PDmap = np.clip(PDmap,a_min=0,a_max=1)
+        T1map = np.clip(T1map,a_min=0,a_max=None)
+        T2map = np.clip(T2map,a_min=0,a_max=None)
+
+        super().__init__(T1map,T2map,PDmap,dr,dBmap)
+
+
+class SpheresArrayPlanarPhantom(DTTPhantom):
+    """2D phantom extracted from a cylinder containing spheres
+
+    Regardless of dir, this will be an axial slice of a cylinder
+    That is, the plane is constructed as a z-slice and then re-indexed to lie in the x or y plane
+    The centers of spheres will correspond to locations before re-indexing
+
+    Parameters
+    ----------
+    centers : list or array_like
+        List of 3D physical locations of the spheres' centers
+    radii : list or array_like
+        List of radii for the spheres
+    type_params : dict
+        Dictionary that maps tissue type number to tissue type parameters (PD,T1,T2)
+    fov : float
+        Field of view (isotropic)
+    n : int
+        Matrix size
+    dir : str, optional {'z','x','y'}
+        Orientation of plane; default is z
+    R : float, optional
+        Cylinder's cross-section radius; default is half of fov
+    loc : tuple, optional
+        Overall location (x,y,z) of phantom from isocenter in meters
+        Default is (0,0,0)
+
+
+    """
+    def __init__(self, centers, radii, type_params, fov, n, dir='z',R=0,loc=(0,0,0)):
+        if R == 0:
+            R = fov/2
+        vsize = fov/n
+        type_map = np.zeros((n,n,1))
+        q = (n-1)/2
+        centers_inds = [(np.array(c) / vsize + q) for c in centers]
+        nc = len(centers)
+        for r1 in range(n):
+            for r2 in range(n):
+                if vsize * np.sqrt((r1-q)**2+(r2-q)**2)<R:
+                    type_map[r1,r2,0] = nc + 1
+                for k in range(len(centers_inds)):
+                    ci = centers_inds[k]
+                    d = vsize * np.sqrt((r1 - ci[0]) ** 2 + (r2 - ci[1])**2)
+                    if d <= radii[k]:
+                        type_map[r1,r2,0] = k + 1
+                        break
+        if dir == 'x':
+            type_map = np.swapaxes(type_map, 1, 2)
+            type_map = np.swapaxes(type_map, 0, 1)
+        elif dir == 'y':
+            type_map = np.swapaxes(type_map, 0, 2)
+            type_map = np.swapaxes(type_map, 0, 1)
+
+        super().__init__(type_map, type_params, vsize, loc=loc)
+
+
+def makeSphericalPhantom(n,fov,T1s,T2s,PDs,radii,loc=(0,0,0)):
+    """Make a spherical phantom with concentric layers
+
+    Parameters
+    ----------
+    n : int
+        Matrix size of phantom (isotropic)
+    fov : float
+        Field of view of phantom (isotropic)
+    T1s : numpy.ndarray or list
+        List of T1s in seconds for the layers, going outward
+    T2s : numpy.ndarray or list
+        List of T2s in seconds for the layers, going outward
+    PDs : numpy.ndarray or list
+        List of PDs between 0 and 1 for the layers, going outward
+    radii : numpy.ndarray
+        List of radii that define the layers
+        Note that the radii are expected to go from smallest to largest
+        If not, they will be sorted first without sorting the parameters
+    loc : tuple, optional
+        Overall (x,y,z) location of phantom; default is (0,0,0)
+
+    Returns
+    -------
+    phantom : DTTPhantom
+
+    """
+    radii = np.sort(radii)
+    m = np.shape(radii)[0]
+    vsize = fov/n
+    type_map = np.zeros((n,n,n))
+    type_params = {}
+    for x in range(n):
+        for y in range(n):
+            for z in range(n):
+                d = vsize*np.linalg.norm(np.array([x,y,z])-(n-1)/2)
+                for k in range(m):
+                    if d <= radii[k]:
+                        type_map[x,y,z] = k+1
+                        break
+
+    type_params[0] = (0,1,1)
+    for k in range(m):
+        type_params[k+1] = (PDs[k],T1s[k],T2s[k])
+
+    return DTTPhantom(type_map,type_params,vsize,loc)
+
+
+def makePlanarPhantom(n,fov,T1s,T2s,PDs,radii,dir='z',loc=(0,0,0)):
+    """Make a circular 2D phantom with concentric layers
+
+    Parameters
+    ----------
+    n : int
+        Matrix size of phantom (isotropic)
+    fov : float
+        Field of view of phantom (isotropic)
+    T1s : numpy.ndarray or list
+        List of T1s in seconds for the layers, going outward
+    T2s : numpy.ndarray or list
+        List of T2s in seconds for the layers, going outward
+    PDs : numpy.ndarray or list
+        List of PDs between 0 and 1 for the layers, going outward
+    radii : numpy.ndarray
+        List of radii that define the layers
+        Note that the radii are expected to go from smallest to largest
+        If not, they will be sorted first without sorting the parameters
+    dir : str, optional {'z','x','y'}
+         Orientation of the plane; default is z, axial
+    loc : tuple, optional
+        Overall (x,y,z) location of phantom; default is (0,0,0)
+
+    Returns
+    -------
+    phantom : DTTPhantom
+
+    """
+    radii = np.sort(radii)
+    m = np.shape(radii)[0]
+    vsize = fov / n
+    type_map = np.zeros((n, n, 1))
+    type_params = {}
+    for x in range(n):
+        for y in range(n):
+                d = vsize * np.linalg.norm(np.array([x, y]) - (n - 1) / 2)
+                for k in range(m):
+                    if d <= radii[k]:
+                        type_map[x,y,0] = k + 1
+                        break
+
+    type_params[0] = (0, 1, 1)
+    for k in range(m):
+        type_params[k + 1] = (PDs[k], T1s[k], T2s[k])
+
+    if dir == 'x':
+        type_map = np.swapaxes(type_map,1,2)
+        type_map = np.swapaxes(type_map,0,1)
+    elif dir =='y':
+        type_map = np.swapaxes(type_map,0,2)
+        type_map = np.swapaxes(type_map,0,1)
+
+    return DTTPhantom(type_map, type_params, vsize, loc)
+
+
+def makeCylindricalPhantom(dim=2,n=16,dir='z',loc=0,fov=0.24):
+    """Makes a cylindrical phantom with fixed geometry and T1, T2, PD but variable resolution and overall size
+
+    The cylinder's diameter is the same as its height; three layers of spheres represent T1, T2, and PD variation.
+
+    Parameters
+    ----------
+    dim : int, optional {2,3}
+         Dimension of phantom created
+    n : int
+        Number of spin groups in each dimension; default is 16
+    dir : str, optional {'z', 'x', 'y'}
+        Direction (norm) of plane in the case of 2D phantom
+    loc : float, optional
+        Location of plane relative to isocenter; default is 0
+    fov : float, optional
+        Physical length for both diameter and height of cylinder
+
+    Returns
+    -------
+    phantom : DTTPhantom
+
+    """
+    R = fov/2 # m
+    r = R/4 # m
+    h = fov # m
+    s2 = np.sqrt(2)
+    s3 = np.sqrt(3)
+    vsize = fov/n
+    centers = [(0,R/2,0.08),(-R*s3/4,-R/4,0.08),(R*s3/4,-R/4,0.08), # PD spheres
+               (R/(2*s2),R/(2*s2),0),(-R/(2*s2),R/(2*s2),0),(-R/(2*s2),-R/(2*s2),0),(R/(2*s2),-R/(2*s2),0), # T1 spheres
+               (0,R/2,-0.08),(-R/2,0,-0.08),(0,-R/2,-0.08),(R/2,0,-0.08)] # T2 spheres
+    centers_inds = [(np.array(c)/vsize + (n-1)/2) for c in centers]
+
+    type_params = {0:(0,1,1), # background
+                   1:(1,0.5,0.1),2:(0.75,0.5,0.1),3:(0.5,0.5,0.1), # PD spheres
+                4:(0.75,1.5,0.1),5:(0.75,0.6,0.1),6:(0.75,0.25,0.1),7:(0.75,0.1,0.1), # T1 spheres
+                8:(0.75,0.5,0.5),9:(0.75,0.5,0.15),10:(0.75,0.5,0.05),11:(0.75,0.5,0.01), # T2 spheres
+                13:(0.25,0.5,0.1)}
+
+    q = (n - 1) / 2
+    p = 'xyz'.index(dir)
+    pht_loc = (0, 0, 0)
+
+    if dim == 3:
+        type_map = np.zeros((n, n, n))
+        for x in range(n):
+            for y in range(n):
+                for z in range(n):
+                    if vsize*np.sqrt((x-q)**2 + (y-q)**2) < R:
+                        type_map[x,y,z] = 13
+                    for k in range(len(centers_inds)):
+                        ci = centers_inds[k]
+                        d = vsize*np.sqrt((x-ci[0])**2+(y-ci[1])**2+(z-ci[2])**2)
+                        if d <= r:
+                            type_map[x, y, z] = k + 1
+                            break
+
+    elif dim == 2:
+        pht_loc = np.roll((loc,0,0),p)
+        # 2D phantom
+        type_map = np.zeros(np.roll((1,n,n),p))
+        for r1 in range(n):
+            for r2 in range(n):
+                x,y,z = np.roll([q+loc/vsize,r1,r2],p)
+                u,v,w = np.roll((0,r1,r2),p)
+                if vsize*np.sqrt((x-q)**2 + (y-q)**2) < R:
+                    type_map[u,v,w] = 13
+                    for k in range(len(centers_inds)):
+                        ci = centers_inds[k]
+                        d = vsize*np.sqrt((x-ci[0])**2+(y-ci[1])**2+(z-ci[2])**2)
+                        if d <= r:
+                            type_map[u,v,w] = k + 1
+                            break
+
+
+    else:
+        raise ValueError('#Dimensions must be 2 or 3')
+
+    phantom = DTTPhantom(type_map, type_params, vsize, loc=(0,0,0))
+    return phantom
+
+
+if __name__ == '__main__':
+    pht = makeCylindricalPhantom(dim=2, n=16, dir='z', loc=-0.08, fov = 0.25)
+    plt.imshow(pht.PDmap)
+    plt.show()
+    print(pht.loc)

+ 33 - 0
LF_scanner/py2jemris/pull_request_template.md

@@ -0,0 +1,33 @@
+# Description
+
+Summarize feature additions, code improvements, or issue fixes. Include relevant motivation and context. List any dependencies with versions specified. 
+
+If responding to a posted Github Issue, refer to it here. 
+e.g. This fixes issue #(issue number). 
+
+## Type of change
+
+- [ ] Bug fix (non-breaking change which fixes an issue)
+- [ ] New feature (non-breaking change which adds functionality)
+- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
+- [ ] This change requires a documentation update
+
+# How Has This Been Tested?
+
+Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration.
+
+- [ ] Test A
+- [ ] Test B
+
+**Test Configuration**:
+* Operating system: 
+* Python version: 
+
+# Checklist:
+
+- [ ] My code follows the style guidelines of this project
+- [ ] I have performed a self-review of my own code
+- [ ] I have commented my code, particularly in hard-to-understand areas and changes upon existing code 
+- [ ] I have made corresponding changes to the documentation
+- [ ] I have added tests that prove my fix is effective or that my feature works
+- [ ] New and existing unit tests pass locally with my changes

+ 210 - 0
LF_scanner/py2jemris/pulseq_jemris_simulator.py

@@ -0,0 +1,210 @@
+# Same role as pulseq_bloch_simulator.py except (1) It uses JEMRIS (2) It is run as a function, not a script
+
+# INPUTS: sequence type, geometry parameters, contrast parameters,
+#         phantom type (pre-set or custom), coil type (pre-set or custom) (Tx / Rx),
+#         k-space trajectory (if noncartesian; flattened version (Nro_total, 3))
+import time
+import os
+from sim_jemris import sim_jemris
+from recon_jemris import read_jemris_output, recon_jemris
+from coil2xml import coil2xml
+#from virtualscanner.core import constants
+from seq2xml import seq2xml
+from pypulseq.Sequence.sequence import Sequence
+import phantom as pht
+import numpy as np
+import xml.etree.ElementTree as ET
+from pulseq_library import make_pulseq_se_oblique,make_pulseq_gre_oblique, make_pulseq_irse_oblique
+from scipy.io import savemat, loadmat
+
+def simulate_pulseq_jemris(seq_path, phantom_info, coil_fov,
+                           tx='uniform', rx='uniform', # TODO add input that includes sequence info for
+                                                       # TODO      dimensioning the RO points into kspace
+                           tx_maps=None, rx_maps=None, sim_name=None, env_option="local"):
+    """Runs simulation using an already-made .seq file
+
+    Inputs
+    ------
+    seq_path : str
+        Path to seq file
+    phantom_info : dict
+        Information used to create phantom; input to create_and_save_phantom()
+    coil_fov : float
+        Field-of-view of coil in mm
+    tx : str, optional
+        Type of tx coil; default is 'uniform'; the only other option is 'custom'
+    rx : str, optional
+        Type of rx coil; default is 'uniform'; the only other option is 'custom'
+    tx_maps : list, optional
+        List of np.ndarray (dtype='complex') maps for all tx channels
+        Required for 'custom' type tx
+    rx_maps : list, optional
+        List of np.ndarray (dtype='complex') maps for all rx channels
+        Required for 'custom' type rx
+    sim_name : str, optional
+        Used as folder name inside sim folder
+        Default is None, in which case sim_name will be set to the current timestamp
+
+    Returns
+    -------
+    sim_output :
+        Delivers output from sim_jemris
+
+    """
+    if sim_name is None:
+        sim_name = time.strftime("%Y%m%d%H%M%S")
+
+    if env_option == 'local':
+        target_path =  'sim\\' +  sim_name
+    elif env_option == 'colab':
+        target_path = 'sim\\' + sim_name
+
+    # Make target folder
+    dir_str = f'sim\\{sim_name}'
+    if not os.path.isdir(dir_str):
+        os.system(f'mkdir {dir_str}')
+
+    # Convert .seq to .xml
+    seq = Sequence()
+    seq.read(seq_path)
+    print(seq.get_block(1))
+    seq_name = seq_path[seq_path.rfind('/')+1:seq_path.rfind('.seq')]
+    seq2xml(seq, seq_name=seq_name, out_folder=str(target_path))
+
+    # Make phantom and save as .h5 file
+    pht_name = create_and_save_phantom(phantom_info, out_folder=target_path)
+
+
+    # Make sure we have the tx/rx files
+    tx_filename = tx + '.xml'
+    rx_filename = rx + '.xml'
+
+    # Save Tx as xml
+    if tx == 'uniform':
+        cp_command = f'copy {os.getcwd()}\\sim\\{tx}.xml {os.getcwd()}\\{str(target_path)}\\{tx}.xml'
+        print(cp_command)
+        a = os.system(cp_command)
+        print(a)
+    elif tx == 'custom' and tx_maps is not None:
+        coil2xml(b1maps=tx_maps, fov=coil_fov, name='custom_tx', out_folder=target_path)
+        tx_filename = 'custom_tx.xml'
+    else:
+        raise ValueError('Tx coil type not found')
+
+    # save Rx as xml
+    if rx == 'uniform':
+        os.system(f'copy sim\\{rx}.xml sim\\{str(target_path)}')
+    elif rx == 'custom' and rx_maps is not None:
+        coil2xml(b1maps=rx_maps, fov=coil_fov, name='custom_rx', out_folder=target_path)
+        rx_filename = 'custom_rx.xml'
+    else:
+        raise ValueError('Rx coil type not found')
+
+    # Run simuluation in target folder
+    list_sim_files = {'seq_xml': seq_name+'.xml', 'pht_h5': pht_name + '.h5', 'tx_xml': tx_filename,
+                       'rx_xml': rx_filename}
+    sim_output = sim_jemris(list_sim_files=list_sim_files, working_folder=target_path)
+
+    return sim_output
+
+
+# TODO
+def create_and_save_phantom(phantom_info, out_folder):
+    """Generates a phantom and saves it into desired folder as .h5 file for JEMRIS purposes
+
+    Inputs
+    ------
+    phantom_info : dict
+        Info of phantom to be constructed
+
+        REQUIRED
+        'fov' : float, field-of-view [meters]
+        'N' : int, phantom matrix size (isotropic)
+        'type' : str, 'spherical', 'cylindrical' or 'custom'
+        'dim' : int, either 3 or 2; 3D or 2D phantom options
+        'dir' : str, {'x', 'y', 'z'}; orientation of 2D phantom
+
+        OPTIONAL (only required for 'custom' phantom type)
+        'T1' : np.ndarray, T1 map matrix
+        'T2' : np.ndarray, T2 map matrix
+        'PD' : np.ndarray, PD map matrix
+        'dr' : float, voxel size [meters] (isotropic)
+        'dBmap' : optional even for 'custom' type. If not provided, dB is set to 0 everywhere.
+
+
+    out_folder : str or pathlib Path object
+        Path to directory where phantom will be saved
+
+    Returns
+    -------
+    pht_type : str
+        phantom_info['pht_type'] (returned for naming purposes)
+
+
+    """
+    out_folder = str(out_folder)
+
+    FOV = phantom_info['fov']
+    N = phantom_info['N']
+    pht_type = phantom_info['type']
+    pht_dim = phantom_info['dim']
+    pht_dir = phantom_info['dir']
+    pht_loc = phantom_info['loc']
+
+    sim_phantom = 0
+
+    if pht_type == 'spherical':
+        print('Making spherical phantom')
+        T1s = [1000*1e-3]
+        T2s = [100*1e-3]
+        PDs = [1]
+        R = 0.8*FOV/2
+        Rs = [R]
+        if pht_dim == 3:
+            sim_phantom = pht.makeSphericalPhantom(n=N, fov=FOV, T1s=T1s, T2s=T2s, PDs=PDs, radii=Rs)
+
+        elif pht_dim == 2:
+            sim_phantom = pht.makePlanarPhantom(n=N, fov=FOV, T1s=T1s, T2s=T2s, PDs=PDs, radii=Rs,
+                                                dir=pht_dir, loc=0)
+    elif pht_type == 'cylindrical':
+        print("Making cylindrical phantom")
+        sim_phantom = pht.makeCylindricalPhantom(dim=pht_dim, n=N, dir=pht_dir, loc=pht_loc)
+
+    elif pht_type == 'custom':
+        # Use a custom file!
+        T1 = phantom_info['T1']
+        T2 = phantom_info['T2']
+        PD = phantom_info['PD']
+        dr = phantom_info['dr']
+        if 'dBmap' in phantom_info.keys():
+            dBmap = phantom_info['dBmap']
+        else:
+            dBmap = 0
+
+        sim_phantom = pht.Phantom(T1map=T1, T2map=T2, PDmap=PD, vsize=dr, dBmap=dBmap, loc=(0,0,0))
+
+    else:
+        raise ValueError("Phantom type non-existent!")
+
+    # Save as h5
+    sim_phantom.output_h5(out_folder, pht_type)
+
+    return pht_type
+
+if __name__ == '__main__':
+    # Define the same phantom
+    phantom_info = {'fov': 0.256, 'N': 32, 'type': 'cylindrical', 'dim':2, 'dir':'z'}
+    sim_names = ['test0413_GRE', 'test0413_SE', 'test0413_IRSE']
+    sps = ['gre_fov256mm_Nf15_Np15_TE50ms_TR200ms_FA90deg.seq',
+           'se_fov256mm_Nf15_Np15_TE50ms_TR200ms_FA90deg.seq',
+           'irse_fov256mm_Nf15_Np15_TI20ms_TE50ms_TR200ms_FA90deg.seq']
+    # make_pulseq_irse_oblique(fov=0.256,n=15, thk=0.005, tr=0.2, te=0.05, ti=0.02, fa=90,
+    #                          enc='xyz', slice_locs=[0], write=True)
+    # make_pulseq_gre_oblique(fov=0.256,n=15, thk=0.005, tr=0.2, te=0.05, fa=90,
+    #                          enc='xyz', slice_locs=[0], write=True)
+    # make_pulseq_se_oblique(fov=0.256,n=15, thk=0.005, tr=0.2, te=0.05, fa=90,
+    #                          enc='xyz', slice_locs=[0], write=True)
+    simulate_pulseq_jemris(seq_path=sps[0], phantom_info=phantom_info, sim_name=sim_names[0],
+                               coil_fov=0.256)
+    kk, im, images = recon_jemris(file='sim/' + sim_names[0] + '/signals.h5', dims=[15,15])
+    savemat('sim/'+sim_names[0]+'/output.mat', {'images': images, 'kspace': kk, 'imspace': im})

+ 1150 - 0
LF_scanner/py2jemris/pulseq_library.py

@@ -0,0 +1,1150 @@
+# Copyright of the Board of Trustees of Columbia University in the City of New York
+"""
+Library for generating pulseq sequences: GRE, SE, IRSE, EPI
+"""
+
+# TODO update for PyPulseq 1.2.1
+
+import copy
+from math import pi, sqrt, ceil, floor
+
+import numpy as np
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trap_pulse import make_trapezoid
+from pypulseq.opts import Opts
+
+GAMMA_BAR = 42.5775e6
+GAMMA = 2*pi*GAMMA_BAR
+
+
+def make_pulseq_gre(fov,n,thk,fa,tr,te,enc='xyz',slice_locs=None,write=False):
+    """Makes a gradient-echo sequence
+
+    2D orthogonal multi-slice gradient-echo pulse sequence with Cartesian encoding
+    Orthogonal means that each of slice-selection, phase encoding, and frequency encoding
+    aligns with the x, y, or z directions
+
+    Parameters
+    ----------
+    fov : float
+        Field-of-view in meters (isotropic)
+    n : int
+        Matrix size (isotropic)
+    thk : float
+        Slice thickness in meters
+    fa : float
+        Flip angle in degrees
+    tr : float
+        Repetition time in seconds
+    te : float
+        Echo time in seconds
+    enc : str, optional
+        Spatial encoding directions
+        1st - readout; 2nd - phase encoding; 3rd - slice select
+        Default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+    slice_locs : array_like, optional
+        Slice locations from isocenter in meters
+        Default is None which means a single slice at the center
+    write : bool, optional
+        Whether to write seq into file; default is False
+
+    Returns
+    -------
+    seq : Sequence
+        Pulse sequence as a Pulseq object
+
+    """
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+
+    Nf = n
+    Np = n
+    flip = fa * pi / 180
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip, system=system, duration=4e-3, slice_thickness=thk,
+                               apodization=0.5, time_bw_product=4)
+
+    g_ss.channel = enc[2]
+
+    delta_k = 1 / fov
+    kWidth = Nf * delta_k
+
+    # Readout and ADC
+    readoutTime = 6.4e-3
+    g_ro= make_trapezoid(channel=enc[0],system=system,flat_area=kWidth,flat_time=readoutTime)
+    adc = make_adc(num_samples=Nf, duration=g_ro.flat_time, delay=g_ro.rise_time)
+
+    # Readout rewinder
+    g_ro_pre = make_trapezoid(channel=enc[0],system=system,area=-g_ro.area/2,duration=2e-3)
+    # Slice refocusing
+    g_ss_reph = make_trapezoid(channel=enc[2],system=system,area=-g_ss.area/2,duration=2e-3)
+    phase_areas = (np.arange(Np) - (Np / 2)) * delta_k
+
+    # TE, TR = 10e-3, 1000e-3
+    TE, TR = te,tr
+    delayTE = TE - calc_duration(g_ro_pre) - calc_duration(g_ss) / 2 - calc_duration(g_ro) / 2
+    delayTR = TR - calc_duration(g_ro_pre) - calc_duration(g_ss) - calc_duration(g_ro) - delayTE
+    delay1 = make_delay(delayTE)
+    delay2 = make_delay(delayTR)
+
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    for u in range(len(locs)):
+        # add frequency offset
+        rf.freq_offset = g_ss.amplitude * locs[u]
+        for i in range(Np):
+            seq.add_block(rf, g_ss)
+            g_pe = make_trapezoid(channel=enc[1],system=system,area=phase_areas[i],duration=2e-3)
+            seq.add_block(g_ro_pre, g_pe, g_ss_reph)
+            seq.add_block(delay1)
+            seq.add_block(g_ro,adc)
+            seq.add_block(delay2)
+
+    if write:
+        seq.write("gre_fov{:.0f}mm_Nf{:d}_Np{:d}_TE{:.0f}ms_TR{:.0f}ms_FA{:.0f}deg.seq".format(fov * 1000, Nf, Np, TE * 1000,
+                                                                                               TR * 1000, flip * 180 / pi))
+    print('GRE sequence constructed')
+    return seq
+
+
+def make_pulseq_gre_oblique(fov,n,thk,fa,tr,te,enc='xyz',slice_locs=None,write=False):
+    """Makes a gradient-echo sequence in any plane
+
+        2D oblique multi-slice gradient-echo pulse sequence with Cartesian encoding
+        Oblique means that each of slice-selection, phase encoding, and frequency encoding
+        can point in any specified direction
+
+        Parameters
+        ----------
+        fov : array_like
+            Isotropic field-of-view, or length-2 list [fov_readout, fov_phase], in meters
+        n : array_like
+            Isotropic matrix size, or length-2 list [n_readout, n_phase]
+        thk : float
+            Slice thickness in meters
+        fa : float
+            Flip angle in degrees
+        tr : float
+            Repetition time in seconds
+        te : float
+            Echo time in seconds
+        enc : str or array_like, optional
+            Spatial encoding directions
+            1st - readout; 2nd - phase encoding; 3rd - slice select
+            - Use str with any permutation of x, y, and z to obtain orthogonal slices
+            e.g. The default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+            - Use list to indicate vectors in the encoding directions for oblique slices
+            They should be perpendicular to each other, but not necessarily unit vectors
+            e.g. [(2,1,0),(-1,2,0),(0,0,1)] rotates the two in-plane encoding directions for an axial slice
+        slice_locs : array_like, optional
+            Slice locations from isocenter in meters
+            Default is None which means a single slice at the center
+        write : bool, optional
+            Whether to write seq into file; default is False
+
+        Returns
+        -------
+        seq : Sequence
+            Pulse sequence as a Pulseq object
+
+        """
+
+    # System options
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+    # Calculate unit gradients for ss, fe, pe
+    ug_fe, ug_pe, ug_ss = parse_enc(enc)
+
+    # Sequence parameters
+    Nf, Np = (n,n) if isinstance(n,int) else (n[0], n[1])
+    delta_k_ro, delta_k_pe = (1/fov,1/fov) if isinstance(fov,float) else (1/fov[0], 1/fov[1])
+    kWidth_ro = Nf * delta_k_ro
+    flip = fa * pi / 180
+
+    # Slice select: RF and gradient
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip,system=system,duration=4e-3,slice_thickness=thk,
+                               apodization=0.5, time_bw_product=4)
+    g_ss_x, g_ss_y, g_ss_z = make_oblique_gradients(g_ss,ug_ss)
+
+    # Readout and ADC
+#    readoutTime = 6.4e-3
+    dwell = 10e-6
+    g_ro= make_trapezoid(channel='x',system=system,flat_area=kWidth_ro, flat_time=dwell*Nf)
+    g_ro_x, g_ro_y, g_ro_z = make_oblique_gradients(g_ro,ug_fe)#
+    adc = make_adc(num_samples=Nf, duration=g_ro.flat_time,delay=g_ro.rise_time)
+
+    # Readout rewinder
+    g_ro_pre = make_trapezoid(channel='x',system=system,area=-g_ro.area/2,duration=2e-3)
+    g_ro_pre_x, g_ro_pre_y, g_ro_pre_z = make_oblique_gradients(g_ro_pre,ug_fe)#
+
+    # Slice refocusing
+    g_ss_reph = make_trapezoid(channel='z',system=system,area=-g_ss.area/2,duration=2e-3)
+    g_ss_reph_x, g_ss_reph_y, g_ss_reph_z = make_oblique_gradients(g_ss_reph, ug_ss)
+
+    # Prepare phase areas
+    phase_areas = (np.arange(Np) - (Np / 2)) * delta_k_pe
+
+    TE, TR = te,tr
+    delayTE = TE - calc_duration(g_ro_pre) - calc_duration(g_ss) / 2 - calc_duration(g_ro) / 2
+    delayTR = TR - calc_duration(g_ro_pre) - calc_duration(g_ss) - calc_duration(g_ro) - delayTE
+    delay1 = make_delay(delayTE)
+    delay2 = make_delay(delayTR)
+
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    # Construct sequence!
+    for u in range(len(locs)):
+        # add frequency offset
+        rf.freq_offset = g_ss.amplitude * locs[u]
+        for i in range(Np):
+            seq.add_block(rf,g_ss_x, g_ss_y, g_ss_z)
+            g_pe = make_trapezoid(channel='y',system=system,area=phase_areas[i],duration=2e-3)
+
+            g_pe_x, g_pe_y, g_pe_z = make_oblique_gradients(g_pe,ug_pe)
+
+            pre_grads_list = [g_ro_pre_x, g_ro_pre_y, g_ro_pre_z,
+                             g_ss_reph_x, g_ss_reph_y, g_ss_reph_z,
+                             g_pe_x, g_pe_y, g_pe_z]
+
+            gtx, gty, gtz = combine_trap_grad_xyz(gradients=pre_grads_list,system=system, dur=2e-3)
+
+            seq.add_block(gtx, gty, gtz)
+            seq.add_block(delay1)
+            seq.add_block(g_ro_x, g_ro_y, g_ro_z, adc)
+            seq.add_block(delay2)
+
+    if write:
+        seq.write("gre_fov{:.0f}mm_Nf{:d}_Np{:d}_TE{:.0f}ms_TR{:.0f}ms_FA{:.0f}deg.seq".format(fov * 1000, Nf, Np, TE * 1000,
+                                                                                               TR * 1000, flip * 180 / pi))
+    print('GRE sequence constructed')
+    return seq
+
+
+def make_pulseq_irse(fov,n,thk,fa,tr,te,ti,enc='xyz',slice_locs=None,write=False):
+    """Makes an Inversion Recovery Spin Echo (IRSE) sequence
+
+        2D orthogonal multi-slice IRSE pulse sequence with Cartesian encoding
+        Orthogonal means that each of slice-selection, phase encoding, and frequency encoding
+        aligns with the x, y, or z directions
+
+        Parameters
+        ----------
+        fov : float
+            Field-of-view in meters (isotropic)
+        n : int
+            Matrix size (isotropic)
+        thk : float
+            Slice thickness in meters
+        fa : float
+            Flip angle in degrees
+        tr : float
+            Repetition time in seconds
+        te : float
+            Echo time in seconds
+        ti : float
+            Inversion time in seconds
+        enc : str, optional
+            Spatial encoding directions
+            1st - readout; 2nd - phase encoding; 3rd - slice select
+            Default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+        slice_locs : array_like, optional
+            Slice locations from isocenter in meters
+            Default is None which means a single slice at the center
+        write : bool, optional
+            Whether to write seq into file; default is False
+
+        Returns
+        -------
+        seq : Sequence
+            Pulse sequence as a Pulseq object
+
+        """
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+    # Parameters
+    Nf = n
+    Np = n
+    delta_k = 1 / fov
+    kWidth = Nf * delta_k
+
+    TI,TE,TR = ti,te,tr
+
+    if np.shape(TI) == ():
+        TI = [TI]
+
+
+    # Non-180 pulse
+    flip1 = fa * pi / 180
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip1, system=system, duration=2e-3, slice_thickness=thk,
+                               apodization=0.5, time_bw_product=4)
+
+    g_ss.channel = enc[2]
+
+    # 180 pulse
+    flip2 = 180 * pi / 180
+    rf180, g_ss180, __ = make_sinc_pulse(flip_angle=flip2, system=system,duration=2e-3, slice_thickness=thk,
+                                     apodization=0.5, time_bw_product=4)
+    g_ss180.channel = enc[2]
+
+    # Readout gradient & ADC
+    readoutTime = 6.4e-3
+
+    g_ro = make_trapezoid(channel=enc[0],system=system, flat_area=kWidth, flat_time=readoutTime)
+    adc = make_adc(num_samples=Nf, system=system, duration=g_ro.flat_time, delay=g_ro.rise_time)
+
+    # RO rewinder gradient
+    g_ro_pre = make_trapezoid(channel=enc[0],system=system,area=g_ro.area/2,duration=2e-3)
+
+    # Slice refocusing gradient
+    g_ss_reph = make_trapezoid(channel=enc[2],system=system,area=-g_ss.area/2,duration=2e-3)
+
+    # Delays
+
+    delayTE1 = TE / 2 - max(calc_duration(g_ss_reph), calc_duration(g_ro_pre)) - calc_duration(g_ss) / 2 - calc_duration(
+        g_ss180) / 2
+    delayTE2 = TE / 2 - calc_duration(g_ro) / 2 - calc_duration(g_ss180) / 2
+    delayTE3 = TR - TE - calc_duration(g_ss) / 2 - calc_duration(g_ro) / 2
+
+    print('dur rf', calc_duration(rf),'dur gss:' ,calc_duration(g_ss))
+
+    delay1 = make_delay(delayTE1)
+    delay2 = make_delay(delayTE2)
+    delay3 = make_delay(delayTE3)
+
+    # Construct sequence
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    for inv in range(len(TI)):
+        for u in range(len(locs)):
+            rf180.freq_offset = g_ss180.amplitude * locs[u]
+            rf.freq_offset = g_ss.amplitude * locs[u]
+            for i in range(Np):
+                # Inversion Recovery part
+                seq.add_block(rf180, g_ss180)# Selective; potentially extended to be non-selective or adiabatic
+                seq.add_block(make_delay(TI[inv] - calc_duration(rf) / 2 - calc_duration(rf180) / 2))  # Inversion time delay
+                # Spin echo part
+                seq.add_block(rf, g_ss)  # 90-deg pulse
+                g_pe_pre = make_trapezoid(channel=enc[1],system=system,area=-(Np/2-i)*delta_k,
+                                          duration=2e-3)  # Phase encoding gradient
+                seq.add_block(g_ro_pre, g_pe_pre, g_ss_reph)  # Add a combination of ro rewinder, phase encoding, and slice refocusing
+                seq.add_block(delay1)  # Delay 1: until 180-deg pulse
+                seq.add_block(rf180, g_ss180)  # 180 deg pulse for SE
+                seq.add_block(delay2)  # Delay 2: until readout
+                seq.add_block(g_ro, adc)  # Readout!
+                seq.add_block(delay3)  # Delay 3: until next inversion pulse
+
+    if write:
+        if len(TI) == 1:
+            seq.write("irse_fov{:.0f}mm_Nf{:d}_Np{:d}_TI{:.0f}ms_TE{:.0f}ms_TR{:.0f}ms.seq".format(fov * 1000, Nf, Np, TI[0] * 1000, TE * 1000, TR * 1000))
+        else:
+            seq.write("irse_fov{:.0f}mm_Nf{:d}_Np{:d}_multiTI_TE{:.0f}ms_TR{:.0f}ms.seq".format(fov * 1000, Nf, Np, TE * 1000, TR * 1000))
+
+    print('IRSE sequence constructed')
+    return seq
+
+def make_pulseq_irse_oblique(fov,n,thk,fa,tr,te,ti,enc='xyz',slice_locs=None,write=False):
+    """Makes an Inversion Recovery Spin Echo (IRSE) sequence in any plane
+
+        2D oblique multi-slice IRSE pulse sequence with Cartesian encoding
+        Oblique means that each of slice-selection, phase encoding, and frequency encoding
+        can point in any specified direction
+
+        Parameters
+        ----------
+        fov : array_like
+            Isotropic field-of-view, or length-2 list [fov_readout, fov_phase], in meters
+        n : array_like
+            Isotropic matrix size, or length-2 list [n_readout, n_phase]
+        thk : float
+            Slice thickness in meters
+        fa : float
+            Flip angle in degrees
+        tr : float
+            Repetition time in seconds
+        te : float
+            Echo time in seconds
+        ti : float
+            Inversion time in seconds
+        enc : str or array_like, optional
+            Spatial encoding directions
+            1st - readout; 2nd - phase encoding; 3rd - slice select
+            - Use str with any permutation of x, y, and z to obtain orthogonal slices
+            e.g. The default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+            - Use list to indicate vectors in the encoding directions for oblique slices
+            They should be perpendicular to each other, but not necessarily unit vectors
+            e.g. [(2,1,0),(-1,2,0),(0,0,1)] rotates the two in-plane encoding directions for an axial slice
+        slice_locs : array_like, optional
+            Slice locations from isocenter in meters
+            Default is None which means a single slice at the center
+        write : bool, optional
+            Whether to write seq into file; default is False
+
+
+        Returns
+        -------
+        seq : Sequence
+            Pulse sequence as a Pulseq object
+
+        """
+    # System options
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+    # Sequence parameters
+    ug_fe, ug_pe, ug_ss = parse_enc(enc)
+    Nf, Np = (n,n) if isinstance(n,int) else (n[0], n[1])
+    delta_k_ro, delta_k_pe = (1/fov,1/fov) if isinstance(fov,float) else (1/fov[0], 1/fov[1])
+    kWidth_ro = Nf * delta_k_ro
+    TI,TE,TR = ti,te,tr
+
+    if np.shape(TI) == ():
+        TI = [TI]
+
+    # Non-180 pulse
+    flip1 = fa * pi / 180
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip1, system=system, duration=2e-3, slice_thickness=thk,
+                                apodization=0.5, time_bw_product=4)
+    g_ss_x, g_ss_y, g_ss_z = make_oblique_gradients(g_ss, ug_ss)
+
+    # 180 pulse
+    flip2 = 180 * pi / 180
+    rf180, g_ss180, __ = make_sinc_pulse(flip_angle=flip2, system=system, duration=2e-3, slice_thickness=thk,
+                                    apodization=0.5, time_bw_product=4)
+    g_ss180_x, g_ss180_y, g_ss180_z = make_oblique_gradients(g_ss180, ug_ss)
+
+    # Readout gradient & ADC
+    readoutTime = 6.4e-3
+
+    g_ro = make_trapezoid(channel='x', system=system, flat_area=kWidth_ro, flat_time= readoutTime)
+    g_ro_x, g_ro_y, g_ro_z = make_oblique_gradients(g_ro, ug_fe)
+
+    adc = make_adc(num_samples=Nf, system=system, duration=g_ro.flat_time, delay=g_ro.rise_time)
+
+    # RO rewinder gradient
+    g_ro_pre = make_trapezoid(channel=enc[0], system=system, area=g_ro.area/2,duration=2e-3)
+    g_ro_pre_x, g_ro_pre_y, g_ro_pre_z = make_oblique_gradients(g_ro_pre,ug_fe)#
+
+    # Slice refocusing gradient
+    g_ss_reph = make_trapezoid(channel=enc[2],system=system,area=-g_ss.area/2,duration=2e-3)
+    g_ss_reph_x, g_ss_reph_y, g_ss_reph_z = make_oblique_gradients(g_ss_reph, ug_ss)
+
+    # Delays
+    delayTE1 = TE / 2 - max(calc_duration(g_ss_reph), calc_duration(g_ro_pre))\
+               - calc_duration(g_ss) / 2 - calc_duration(g_ss180) / 2
+    delayTE2 = TE / 2 - calc_duration(g_ro) / 2 - calc_duration(g_ss180) / 2
+    delayTE3 = TR - TE - calc_duration(g_ss) / 2 - calc_duration(g_ro) / 2
+
+    delay1 = make_delay(delayTE1)
+    delay2 = make_delay(delayTE2)
+    delay3 = make_delay(delayTE3)
+
+    # Construct sequence
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    for inv in range(len(TI)):
+        for u in range(len(locs)):
+            rf180.freq_offset = g_ss180.amplitude * locs[u]
+            rf.freq_offset = g_ss.amplitude * locs[u]
+            for i in range(Np):
+                # Inversion Recovery part
+                seq.add_block(rf180, g_ss180_x, g_ss180_y, g_ss180_z)# Non-selective at the moment; could be extended to make this selective/adiabatic
+                seq.add_block(make_delay(TI[inv] - calc_duration(rf) / 2 - calc_duration(rf180) / 2))  # Inversion time delay
+                # Spin echo part
+                seq.add_block(rf, g_ss_x, g_ss_y, g_ss_z)  # 90-deg pulse
+                g_pe = make_trapezoid(channel='y', system=system, area=-(Np /2 - i)*delta_k_pe, duration=2e-3)  # Phase encoding gradient
+                g_pe_x, g_pe_y, g_pe_z = make_oblique_gradients(g_pe, ug_pe)
+
+                pre_grads_list = [g_ro_pre_x, g_ro_pre_y, g_ro_pre_z,
+                                  g_ss_reph_x, g_ss_reph_y, g_ss_reph_z,
+                                  g_pe_x, g_pe_y, g_pe_z]
+                gtx, gty, gtz = combine_trap_grad_xyz(pre_grads_list,system,2e-3)
+
+                seq.add_block(gtx, gty, gtz)  # Add a combination of ro rewinder, phase encoding, and slice refocusing
+                seq.add_block(delay1)  # Delay 1: until 180-deg pulse
+                seq.add_block(rf180, g_ss180_x, g_ss180_y, g_ss180_z)  # 180 deg pulse for SE
+                seq.add_block(delay2)  # Delay 2: until readout
+                seq.add_block(g_ro_x, g_ro_y, g_ro_z, adc)  # Readout!
+                seq.add_block(delay3)  # Delay 3: until next inversion pulse
+
+    if write:
+        if len(TI) == 1:
+            seq.write("irse_fov{:.0f}mm_Nf{:d}_Np{:d}_TI{:.0f}ms_TE{:.0f}ms_TR{:.0f}ms_FA{:d}deg.seq".format(fov * 1000,
+                                                                            Nf, Np, TI[0] * 1000, TE * 1000, TR * 1000, fa))
+        else:
+            seq.write("irse_fov{:.0f}mm_Nf{:d}_Np{:d}_multiTI_TE{:.0f}ms_TR{:.0f}ms_FA{:d}deg.seq".format(fov * 1000,
+                                                                            Nf, Np, TE * 1000, TR * 1000, fa))
+
+    print('IRSE (oblique) sequence constructed')
+    return seq
+
+def make_pulseq_se(fov,n,thk,fa,tr,te,enc='xyz',slice_locs=None,write=False):
+    """Makes a Spin Echo (SE) sequence
+
+    2D orthogonal multi-slice Spin-Echo pulse sequence with Cartesian encoding
+    Orthogonal means that each of slice-selection, phase encoding, and frequency encoding
+    aligns with the x, y, or z directions
+
+    Parameters
+    ----------
+    fov : float
+        Field-of-view in meters (isotropic)
+    n : int
+        Matrix size (isotropic)
+    thk : float
+        Slice thickness in meters
+    fa : float
+        Flip angle in degrees
+    tr : float
+        Repetition time in seconds
+    te : float
+        Echo time in seconds
+    enc : str, optional
+        Spatial encoding directions
+        1st - readout; 2nd - phase encoding; 3rd - slice select
+        Default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+    slice_locs : array_like, optional
+        Slice locations from isocenter in meters
+        Default is None which means a single slice at the center
+    write : bool, optional
+        Whether to write seq into file; default is False
+
+    Returns
+    -------
+    seq : Sequence
+        Pulse sequence as a Pulseq object
+
+    """
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+    # Parameters
+    Nf = n
+    Np = n
+    delta_k = 1 / fov
+    kWidth = Nf * delta_k
+
+    TE,TR = te,tr
+
+
+    # Non-180 pulse
+    flip1 = fa * pi / 180
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip1, system=system, duration=2e-3, slice_thickness=thk,
+                                apodization=0.5, time_bw_product=4)
+    g_ss.channel = enc[2]
+
+
+    # 180 pulse
+    flip2 = 180 * pi / 180
+    rf180, g_ss180, __ = make_sinc_pulse(flip_angle=flip2, system=system, duration=2e-3, slice_thickness=thk,
+                                    apodization=0.5, time_bw_product=4)
+    g_ss180.channel = enc[2]
+
+    # Readout gradient & ADC
+#    readoutTime = system.grad_raster_time * Nf
+    readoutTime = 6.4e-3
+    g_ro = make_trapezoid(channel=enc[0],system=system,flat_area=kWidth,flat_time=readoutTime)
+    adc = make_adc(num_samples=Nf, system=system, duration=g_ro.flat_time, delay=g_ro.rise_time)
+
+    # RO rewinder gradient
+    g_ro_pre = make_trapezoid(channel=enc[0],system=system,area=g_ro.area/2,duration=2e-3)
+
+    # Slice refocusing gradient
+    g_ss_reph = make_trapezoid(channel=enc[2],system=system,area=-g_ss.area/2,duration=2e-3)
+
+    # Delays
+    delayTE1 = (TE - 2*max(calc_duration(g_ss_reph), calc_duration(g_ro_pre)) - calc_duration(g_ss) - calc_duration(
+        g_ss180))/2
+  # delayTE2 = TE / 2 - calc_duration(g_ro) / 2 - calc_duration(g_ss180) / 2
+    delayTE2 = (TE - calc_duration(g_ro) - calc_duration(g_ss180))/2
+    delayTE3 = TR - TE - (calc_duration(g_ss) + calc_duration(g_ro)) / 2
+
+
+    delay1 = make_delay(delayTE1)
+    delay2 = make_delay(delayTE2)
+    delay3 = make_delay(delayTE3)
+
+
+    # Construct sequence
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    for u in range(len(locs)):
+        rf180.freq_offset = g_ss180.amplitude * locs[u]
+        rf.freq_offset = g_ss.amplitude * locs[u]
+        for i in range(Np):
+            seq.add_block(rf, g_ss)  # 90-deg pulse
+            g_pe_pre = make_trapezoid(channel=enc[1],system=system,area=-(Np/2-i)*delta_k,duration=2e-3)  # Phase encoding gradient
+            seq.add_block(g_ro_pre, g_pe_pre, g_ss_reph)  # Add a combination of ro rewinder, phase encoding, and slice refocusing
+            seq.add_block(delay1)  # Delay 1: until 180-deg pulse
+            seq.add_block(rf180, g_ss180)  # 180 deg pulse for SE
+            seq.add_block(delay2)  # Delay 2: until readout
+            seq.add_block(g_ro, adc)  # Readout!
+            seq.add_block(delay3)  # Delay 3: until next inversion pulse
+
+    if write:
+        seq.write("se_fov{:.0f}mm_Nf{:d}_Np{:d}_TE{:.0f}ms_TR{:.0f}ms.seq".format(fov * 1000, Nf, Np, TE * 1000, TR * 1000))
+
+
+    print('Spin echo sequence constructed')
+    return seq
+
+def make_pulseq_se_oblique(fov,n,thk,fa,tr,te,enc='xyz',slice_locs=None,write=False):
+    """Makes a Spin Echo (SE) sequence in any plane
+
+        2D oblique multi-slice Spin-Echo pulse sequence with Cartesian encoding
+        Oblique means that each of slice-selection, phase encoding, and frequency encoding
+        can point in any specified direction
+
+        Parameters
+        ----------
+        fov : array_like
+            Isotropic field-of-view, or length-2 list [fov_readout, fov_phase], in meters
+        n : array_like
+            Isotropic matrix size, or length-2 list [n_readout, n_phase]
+        thk : float
+            Slice thickness in meters
+        fa : float
+            Flip angle in degrees
+        tr : float
+            Repetition time in seconds
+        te : float
+            Echo time in seconds
+        enc : str or array_like, optional
+            Spatial encoding directions
+            1st - readout; 2nd - phase encoding; 3rd - slice select
+            - Use str with any permutation of x, y, and z to obtain orthogonal slices
+            e.g. The default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+            - Use list to indicate vectors in the encoding directions for oblique slices
+            They should be perpendicular to each other, but not necessarily unit vectors
+            e.g. [(2,1,0),(-1,2,0),(0,0,1)] rotates the two in-plane encoding directions for an axial slice
+        slice_locs : array_like, optional
+            Slice locations from isocenter in meters
+            Default is None which means a single slice at the center
+        write : bool, optional
+            Whether to write seq into file; default is False
+
+        Returns
+        -------
+        seq : Sequence
+            Pulse sequence as a Pulseq object
+
+        """
+
+    # System options
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+    # Sequence parameters
+    ug_fe, ug_pe, ug_ss = parse_enc(enc)
+    Nf, Np = (n,n) if isinstance(n,int) else (n[0], n[1])
+    delta_k_ro, delta_k_pe = (1/fov,1/fov) if isinstance(fov,float) else (1/fov[0], 1/fov[1])
+    kWidth_ro = Nf * delta_k_ro
+    TE,TR = te,tr
+
+    # Non-180 pulse
+    flip1 = fa * pi / 180
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip1, system=system, duration=2e-3, slice_thickness=thk,
+                                apodization=0.5, time_bw_product=4)
+    g_ss_x, g_ss_y, g_ss_z = make_oblique_gradients(g_ss,ug_ss)
+
+    # 180 pulse
+    flip2 = 180 * pi / 180
+    rf180, g_ss180, __ = make_sinc_pulse(flip_angle=flip2, system=system, duration=2e-3, slice_thickness=thk,
+                                     apodization=0.5, time_bw_product=4)
+    g_ss180_x, g_ss180_y, g_ss180_z = make_oblique_gradients(g_ss180,ug_ss)
+
+    # Readout gradient & ADC
+    readoutTime = 6.4e-3
+    g_ro = make_trapezoid(channel='x',system=system,flat_area=kWidth_ro, flat_time=readoutTime)
+    g_ro_x, g_ro_y, g_ro_z = make_oblique_gradients(g_ro, ug_fe)
+    adc = make_adc(num_samples=Nf, system=system, duration=g_ro.flat_time, delay=g_ro.rise_time)
+
+    # RO rewinder gradient
+    g_ro_pre = make_trapezoid(channel='x',system=system,area=g_ro.area/2,duration=2e-3)
+    g_ro_pre_x, g_ro_pre_y, g_ro_pre_z = make_oblique_gradients(g_ro_pre, ug_fe)
+
+    # Slice refocusing gradient
+    g_ss_reph = make_trapezoid(channel='z',system=system,area=-g_ss.area/2,duration=2e-3)
+    g_ss_reph_x, g_ss_reph_y, g_ss_reph_z = make_oblique_gradients(g_ss_reph, ug_ss)
+
+    # Delays
+    delayTE1 = (TE - 2*max(calc_duration(g_ss_reph), calc_duration(g_ro_pre)) - calc_duration(g_ss) - calc_duration(
+        g_ss180))/2
+    delayTE2 = (TE - calc_duration(g_ro) - calc_duration(g_ss180))/2
+    delayTE3 = TR - TE - (calc_duration(g_ss) + calc_duration(g_ro)) / 2
+
+
+    delay1 = make_delay(delayTE1)
+    delay2 = make_delay(delayTE2)
+    delay3 = make_delay(delayTE3)
+
+    # Construct sequence
+    if slice_locs is None:
+        locs = [0]
+    else:
+        locs = slice_locs
+
+    for u in range(len(locs)):
+        rf180.freq_offset = g_ss180.amplitude * locs[u]
+        rf.freq_offset = g_ss.amplitude * locs[u]
+        for i in range(Np):
+            seq.add_block(rf, g_ss_x, g_ss_y, g_ss_z)  # 90-deg pulse
+            g_pe = make_trapezoid(channel='y',system=system,area=-(Np/2 - i)*delta_k_pe, duration=2e-3)  # Phase encoding gradient
+            g_pe_x, g_pe_y, g_pe_z = make_oblique_gradients(g_pe, ug_pe)
+
+            pre_grads_list = [g_ro_pre_x, g_ro_pre_y, g_ro_pre_z,
+                              g_ss_reph_x, g_ss_reph_y, g_ss_reph_z,
+                              g_pe_x, g_pe_y, g_pe_z]
+            gtx, gty, gtz = combine_trap_grad_xyz(pre_grads_list, system, 2e-3)
+
+
+            seq.add_block(gtx,gty,gtz)  # Add a combination of ro rewinder, phase encoding, and slice refocusing
+            seq.add_block(delay1)  # Delay 1: until 180-deg pulse
+            seq.add_block(rf180, g_ss180_x, g_ss180_y, g_ss180_z)  # 180 deg pulse for SE
+            seq.add_block(delay2)  # Delay 2: until readout
+            seq.add_block(g_ro_x, g_ro_y, g_ro_z, adc)  # Readout!
+            seq.add_block(delay3)  # Delay 3: until next inversion pulse
+
+    if write:
+        seq.write("se_fov{:.0f}mm_Nf{:d}_Np{:d}_TE{:.0f}ms_TR{:.0f}ms_FA{:d}deg.seq".format(fov * 1000, Nf, Np, TE * 1000, TR * 1000, fa))
+
+
+    print('Spin echo sequence (oblique) constructed')
+    return seq
+
+
+# TODO multi-shot epi needs to be tested on scanner! : )
+def make_pulseq_epi_oblique(fov,n,thk,fa,tr,te,enc='xyz',slice_locs=None,echo_type="se",n_shots=1,seg_type='blocked',write=False):
+    """Makes an Echo Planar Imaging (EPI) sequence in any plane
+
+        2D oblique multi-slice EPI pulse sequence with Cartesian encoding
+        Oblique means that each of slice-selection, phase encoding, and frequency encoding
+        can point in any specified direction
+
+        Parameters
+        ----------
+        fov : array_like
+            Isotropic field-of-view, or length-2 list [fov_readout, fov_phase], in meters
+        n : array_like
+            Isotropic matrix size, or length-2 list [n_readout, n_phase]
+        thk : float
+            Slice thickness in meters
+        fa : float
+            Flip angle in degrees
+        tr : float
+            Repetition time in seconds
+        te : float
+            Echo time in seconds
+        enc : str or array_like, optional
+            Spatial encoding directions
+            1st - readout; 2nd - phase encoding; 3rd - slice select
+            - Use str with any permutation of x, y, and z to obtain orthogonal slices
+            e.g. The default 'xyz' means axial(z) slice with readout in x and phase encoding in y
+            - Use list to indicate vectors in the encoding directions for oblique slices
+            They should be perpendicular to each other, but not necessarily unit vectors
+            e.g. [(2,1,0),(-1,2,0),(0,0,1)] rotates the two in-plane encoding directions for an axial slice
+        slice_locs : array_like, optional
+            Slice locations from isocenter in meters
+            Default is None which means a single slice at the center
+        echo_type : str, optional {'se','gre'}
+            Type of echo generated
+            se (default) - spin echo (an 180 deg pulse is used)
+            gre - gradient echo
+        n_shots : int, optional
+            Number of shots used to encode each slicel; default is 1
+        seg_type : str, optional {'blocked','interleaved'}
+            Method to divide up k-space in the case of n_shots > 1; default is 'blocked'
+            'blocked' - each shot covers a rectangle, with no overlap between shots
+            'interleaved' - each shot samples the full k-space but with wider phase steps
+
+        write : bool, optional
+            Whether to write seq into file; default is False
+
+        Returns
+        -------
+        seq : Sequence
+            Pulse sequence as a Pulseq object
+        ro_dirs : numpy.ndarray
+            List of 0s and 1s indicating direction of readout
+            0 - left to right
+            1 - right to left (needs to be reversed at recon)
+        ro_order : numpy.ndarray
+            Order in which to re-arrange the readout lines
+            It is [] for blocked acquisition (retain original order)
+
+        """
+    # Multi-slice, multi-shot (>=1)
+    # TE is set to be where the trajectory crosses the center of k-space
+
+    # System options
+    system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130,
+                  slew_unit='T/m/s', rf_ringdown_time=30e-6,
+                  rf_dead_time=100e-6, adc_dead_time=20e-6)
+    seq = Sequence(system)
+
+
+    ug_fe, ug_pe, ug_ss = parse_enc(enc)
+
+    # Sequence parameters
+    Nf, Np = (n,n) if isinstance(n,int) else (n[0], n[1])
+    delta_k_ro, delta_k_pe = (1/fov,1/fov) if isinstance(fov,float) else (1/fov[0], 1/fov[1])
+    kWidth_ro = Nf * delta_k_ro
+    TE,TR = te,tr
+    flip = fa * pi / 180
+
+    # RF Pulse (first)
+    rf, g_ss, __ = make_sinc_pulse(flip_angle=flip, system=system,duration=2.5e-3, slice_thickness=thk,
+                               apodization=0.5, time_bw_product=4)
+    g_ss_x, g_ss_y, g_ss_z = make_oblique_gradients(g_ss, ug_ss)
+
+
+    # Readout gradients
+#    readoutTime = Nf * 4e-6
+    dwell=1e-5
+    readoutTime = Nf*dwell
+    g_ro_pos = make_trapezoid(channel='x',system=system,flat_area=kWidth_ro,flat_time=readoutTime)
+    g_ro_pos_x, g_ro_pos_y, g_ro_pos_z = make_oblique_gradients(g_ro_pos,ug_fe)
+    g_ro_neg = copy.deepcopy(g_ro_pos)
+    modify_gradient(g_ro_neg,scale=-1)
+    g_ro_neg_x, g_ro_neg_y, g_ro_neg_z = make_oblique_gradients(g_ro_neg,ug_fe)
+
+    # TODO make sure delay is a multiple of gradient raster time
+#    adc = make_adc(num_samples=Nf, system=system, duration=g_ro_pos.flat_time, delay=g_ro_pos.rise_time+dwell/2)
+    adc = make_adc(num_samples=Nf, system=system, duration=g_ro_pos.flat_time, delay=g_ro_pos.rise_time)
+    print("ADC delay: ", adc.delay)
+
+    pre_time = 8e-4
+
+    # 180 deg pulse for SE
+    if echo_type == "se":
+        # RF Pulse (180 deg for SE)
+        flip180 = 180 * pi / 180
+        rf180, g_ss180, __ = make_sinc_pulse(flip_angle=flip180, system=system,duration=2.5e-3,slice_thickness=thk,
+                                         apodization=0.5, time_bw_product=4)
+        g_ss180_x, g_ss180_y, g_ss180_z = make_oblique_gradients(g_ss180, ug_ss)
+
+        # Slice-select direction spoilers
+        g_ss_spoil = make_trapezoid(channel='z',system=system,area=g_ss.area*2,duration=3*pre_time)
+        ##
+        modify_gradient(g_ss_spoil,0)
+        ##
+        g_ss_spoil_x, g_ss_spoil_y, g_ss_spoil_z = make_oblique_gradients(g_ss_spoil, ug_ss)
+
+    # Readout rewinder
+    ro_pre_area = g_ro_neg.area / 2 if echo_type == 'gre' else g_ro_pos.area / 2
+    g_ro_pre = make_trapezoid(channel='x',system=system, area=ro_pre_area, duration=pre_time)
+    g_ro_pre_x, g_ro_pre_y, g_ro_pre_z = make_oblique_gradients(g_ro_pre,ug_fe)
+
+    # Slice-selective rephasing
+    g_ss_reph = make_trapezoid(channel='z',system=system,area=-g_ss.area/2,duration=pre_time)
+    g_ss_reph_x, g_ss_reph_y, g_ss_reph_z = make_oblique_gradients(g_ss_reph, ug_ss)
+
+    # Phase encode rewinder
+    if echo_type == 'gre':
+        pe_max_area = (Np/2)*delta_k_pe
+    elif echo_type == 'se':
+        pe_max_area = -(Np/2)*delta_k_pe
+
+    g_pe_max = make_trapezoid(channel='y',system=system,area=pe_max_area,duration=pre_time)
+
+    # Phase encoding blips
+    dur = ceil(2 * sqrt(delta_k_pe/ system.max_slew) / 10e-6) * 10e-6
+    g_blip = make_trapezoid(channel='y',system=system,area=delta_k_pe,duration=dur)
+
+    # Delays
+    duration_to_center = (Np/ 2 ) * calc_duration(g_ro_pos) + (Np-1) / 2 * calc_duration(g_blip) # why?
+
+    if echo_type == 'se':
+        delayTE1 = TE / 2 - calc_duration(g_ss) / 2 - pre_time - calc_duration(g_ss_spoil) - calc_duration(rf180) / 2
+        delayTE2 = TE / 2 - calc_duration(rf180) / 2 - calc_duration(g_ss_spoil) - duration_to_center
+        delay1 = make_delay(delayTE1)
+        delay2 = make_delay(delayTE2)
+    elif echo_type == 'gre':
+        delayTE = TE - calc_duration(g_ss)/2 - pre_time - duration_to_center
+        delay12 = make_delay(delayTE)
+
+    delayTR = TR - TE - calc_duration(rf) / 2 - duration_to_center
+    delay3 = make_delay(delayTR) # This might be different for each rep though. Fix later
+
+#####################################################################################################
+    # Multi-shot calculations
+    ro_dirs = []
+    ro_order = []
+
+    # Find number of lines in each block
+
+    if seg_type == 'blocked':
+
+        # Number of lines in each full readout block
+        nl = ceil(Np / n_shots)
+
+        # Number of k-space lines per readout
+        if Np%nl == 0:
+            nlines_list = nl*np.ones(n_shots)
+        else:
+            nlines_list = nl*np.ones(n_shots-1)
+            nlines_list = np.append(nlines_list,Np%nl)
+
+        pe_scales = 2*np.append([0],np.cumsum(nlines_list)[:-1])/Np - 1
+        g_blip_x, g_blip_y, g_blip_z = make_oblique_gradients(g_blip, ug_pe)
+        for nlines in nlines_list:
+            ro_dirs = np.append(ro_dirs, ((-1)**(np.arange(0,nlines)+1)+1)/2)
+
+
+    elif seg_type == 'interleaved':
+        # Minimum number of lines per readout
+        nb = floor(Np / n_shots)
+
+        # Number of k-space lines per readout
+        nlines_list = np.ones(n_shots)*nb
+        nlines_list[:Np%n_shots] += 1
+
+        # Phase encoding scales (starts from -1; i.e. bottom left combined with pre-readout)
+        pe_scales = 2*np.arange(0,(Np-n_shots)/Np,1/Np)[0:n_shots]-1
+        print(pe_scales)
+        # Larger blips
+        modify_gradient(g_blip, scale=n_shots)
+        g_blip_x, g_blip_y, g_blip_z = make_oblique_gradients(g_blip, ug_pe)
+
+#        ro_order = np.reshape(np.reshape(np.arange(0,Np),(),order='F'),(0,Np))
+
+        ro_order = np.zeros((nb+1,n_shots))
+        ro_inds = np.arange(Np)
+        # Readout order for recon
+        for k in range(n_shots):
+            cs = int(nlines_list[k])
+            ro_order[:cs,k] = ro_inds[:cs]
+            ro_inds = np.delete(ro_inds,range(cs))
+        ro_order = ro_order.flatten()[:Np].astype(int)
+
+        np.save('readout_order_for_interleaving.npy', ro_order)
+
+        print(ro_order)
+
+        # Readout directions in original (interleaved) order
+        for nlines in nlines_list:
+            ro_dirs = np.append(ro_dirs, ((-1)**(np.arange(0,nlines)+1)+1)/2)
+
+
+
+#####################################################################################################
+
+    # Add blocks
+
+    for u in range(len(slice_locs)): # For each slice
+        # Offset rf
+        rf.freq_offset = g_ss.amplitude * slice_locs[u]
+        for v in range(n_shots):
+            # Find init. phase encode
+            g_pe = copy.deepcopy(g_pe_max)
+            modify_gradient(g_pe, pe_scales[v])
+            g_pe_x, g_pe_y, g_pe_z = make_oblique_gradients(g_pe, ug_pe)
+            # First RF
+            seq.add_block(rf, g_ss_x, g_ss_y, g_ss_z)
+            # Pre-winder gradients
+            pre_grads_list = [g_ro_pre_x, g_ro_pre_y, g_ro_pre_z,
+                              g_pe_x, g_pe_y, g_pe_z,
+                              g_ss_reph_x, g_ss_reph_y, g_ss_reph_z]
+            gtx, gty, gtz = combine_trap_grad_xyz(pre_grads_list, system, pre_time)
+            seq.add_block(gtx, gty, gtz)
+
+
+            # 180 deg pulse and spoilers, only for Spin Echo
+            if echo_type == 'se':
+                # First delay
+                seq.add_block(delay1)
+                # Second RF : 180 deg with spoilers on both sides
+                seq.add_block(g_ss_spoil_x, g_ss_spoil_y, g_ss_spoil_z)#why?
+                seq.add_block(rf180, g_ss180_x, g_ss180_y, g_ss180_z)
+                seq.add_block(g_ss_spoil_x, g_ss_spoil_y, g_ss_spoil_z)
+                # Delay between rf180 and beginning of readout
+                seq.add_block(delay2)
+            # For gradient echo it's just a delay
+            elif echo_type == 'gre':
+                seq.add_block(delay12)
+
+
+
+            # EPI readout with blips
+            for i in range(int(nlines_list[v])):
+                if i%2 == 0:
+                    seq.add_block(g_ro_pos_x, g_ro_pos_y, g_ro_pos_z, adc) # ro line in the positive direction
+                else:
+                    seq.add_block(g_ro_neg_x, g_ro_neg_y, g_ro_neg_z, adc) # ro line backwards
+                seq.add_block(g_blip_x, g_blip_y, g_blip_z) # blip
+
+            seq.add_block(delay3)
+
+    # Display 1 TR
+    #seq.plot(time_range=(0, TR))
+
+    if write:
+        seq.write("epi_{}_FOV{:.0f}mm_Nf{:d}_Np{:d}_TE{:.0f}ms_TR{:.0f}ms_FA{:d}deg_{:d}shots.seq"\
+                  .format(echo_type, fov*1000, Nf, Np, TE * 1000, TR * 1000, fa, n_shots))
+
+
+    print('EPI sequence (oblique) constructed')
+    return seq, ro_dirs, ro_order
+
+
+
+
+def parse_enc(enc):
+    """Helper function for decoding enc parameter
+
+    Parameters
+    ----------
+    enc : str or array_like
+        Inputted encoding scheme to parse
+    Returns
+    -------
+    ug_fe : numpy.ndarray
+        Length-3 vector of readout direction
+    ug_pe : numpy.ndarray
+        Length-3 vector of phase encoding direction
+    ug_ss : numpy.ndarray
+        Length-3 vector of slice selecting direction
+
+    """
+    if isinstance(enc, str):
+        xyz_dict = {'x': (1, 0, 0), 'y': (0, 1, 0), 'z': (0, 0, 1)}
+        ug_fe = xyz_dict[enc[0]]
+        ug_pe = xyz_dict[enc[1]]
+        ug_ss = xyz_dict[enc[2]]
+    else:
+        ug_fe = np.array(enc[0])
+        ug_pe = np.array(enc[1])
+        ug_ss = np.array(enc[2])
+
+        ug_fe = ug_fe / np.linalg.norm(ug_fe)
+        ug_pe = ug_pe / np.linalg.norm(ug_pe)
+        ug_ss = ug_ss / np.linalg.norm(ug_ss)
+
+    print('ug_fe: ', ug_fe)
+    print('ug_pe: ', ug_pe)
+    print('ug_ss: ', ug_ss)
+
+    return ug_fe, ug_pe, ug_ss
+
+
+def make_oblique_gradients(gradient,unit_grad):
+    """Helper function to make oblique gradients
+
+    (Gx, Gy, Gz) are generated from a single orthogonal gradient
+    and a direction indicated by unit vector
+
+    Parameters
+    ----------
+    gradient : Gradient
+        Pulseq gradient object
+    unit_grad: array_like
+        Length-3 unit vector indicating direction of resulting oblique gradient
+
+    Returns
+    -------
+    ngx, ngy, ngz : Gradient
+        Oblique gradients in x, y, and z directions
+
+    """
+    ngx = copy.deepcopy(gradient)
+    ngy = copy.deepcopy(gradient)
+    ngz = copy.deepcopy(gradient)
+
+    modify_gradient(ngx, unit_grad[0],'x')
+    modify_gradient(ngy, unit_grad[1],'y')
+    modify_gradient(ngz, unit_grad[2],'z')
+
+    return ngx, ngy, ngz
+
+def modify_gradient(gradient,scale,channel=None):
+    """Helper function to modify the strength and channel of an existing gradient
+
+    Parameters
+    ----------
+    gradient : Gradient
+        Pulseq gradient object to be modified
+    scale : float
+        Scalar to multiply the gradient strength by
+    channel : str, optional {None, 'x','y','z'}
+        Channel to switch gradient into
+        Default is None which keeps the original channel
+
+    """
+    gradient.amplitude *= scale
+    gradient.area *= scale
+    if gradient.type == 'trap':
+        gradient.flat_area *= scale
+    if channel != None:
+        gradient.channel = channel
+
+
+def combine_trap_grad_xyz(gradients,system,dur):
+    """Helper function that merges multiple gradients
+
+    A list of gradients are combined into one set of 3 oblique gradients (Gx, Gy, Gz) with equivalent areas
+    Note that the waveforms are not preserved : the outputs will always be trapezoidal gradients
+
+    Parameters
+    ----------
+    gradients : list
+        List of gradients to be combined; there can be any number of x, y, or z gradients
+    system : Opts
+        Pulseq object that indicates system constraints for gradient parameters
+    dur : float
+        Duration of the output oblique gradients
+
+    Returns
+    -------
+    gtx, gty, gtz : Gradient
+        Oblique pulseq gradients with equivalent areas to all input gradients combined
+
+    """
+    gx_area, gy_area, gz_area = (0,0,0)
+    for g in gradients:
+        if g.channel == 'x':
+            gx_area += g.area
+        elif g.channel == 'y':
+            gy_area += g.area
+        elif g.channel == 'z':
+            gz_area += g.area
+
+
+    gtx = make_trapezoid(channel='x',system=system,area=gx_area,duration=dur)
+    gty = make_trapezoid(channel='y',system=system,area=gy_area,duration=dur)
+    gtz = make_trapezoid(channel='z',system=system,area=gz_area,duration=dur)
+
+    return gtx, gty, gtz
+
+

File diff suppressed because it is too large
+ 2223 - 0
LF_scanner/py2jemris/py2jemris_demo.ipynb


+ 180 - 0
LF_scanner/py2jemris/recon_jemris.py

@@ -0,0 +1,180 @@
+# Converts the jemris simulation outputs  (signals.h5 files) into data or save as .npy or .mat files
+# Gehua Tong
+# March 06, 2020
+
+
+import h5py
+import numpy as np
+import matplotlib.pyplot as plt
+
+def recon_jemris(file, dims):
+    """Reads JEMRIS's signals.h5 output, reconstructs it (Cartesian only for now) using the dimensions specified,
+             and returns both the complex k-space and image matrix AND magnitude images
+
+    Inputs
+    ------
+    file : str
+        Path to signals.h5
+    dims : array_like
+        Dimensions for reconstruction
+        [Nro], [Nro, Nline], or [Nro, Nline, Nslice]
+
+    Returns
+    -------
+    kspace : np.ndarray
+        Complex k-space
+    imspace : np.ndarray
+        Complex image space
+    images : np.ndarray
+        Real, channel-combined images
+
+
+    """
+    Mxy_out, M_vec_out, times_out = read_jemris_output(file)
+    kspace, imspace = recon_jemris_output(Mxy_out, dims)
+    images = save_recon_images(imspace)# TODO save as png (use previous code!)
+
+    return kspace, imspace, images
+
+
+def read_jemris_output(file):
+    """Reads and parses JEMRIS's signals.h5 output
+
+    Inputs
+    ------
+    file : str
+        Path to signals.h5
+
+    Returns
+    -------
+    Mxy_out : np.ndarray
+        Complex representation of transverse magnetization sampled during readout
+        Matrix dimensions : (total # readouts) x (# channels)
+    M_vec_out : np.ndarray
+        3D representation of magnetization vector (Mx, My, Mz) sampled during readout
+        Matrix dimensions : (total # readouts) x 3 x (# channels)
+    times_out : np.ndarray
+        Timing vector for all readout points
+
+
+    """
+    # 1. Read simulated data
+    f = h5py.File(file,'r')
+    signal = f['signal']
+    channels = signal['channels']
+
+    # 2. Initialize output array
+    Nch = len(channels.keys())
+    Nro_tot = channels[list(channels.keys())[0]].shape[0]
+    M_vec_out = np.zeros((Nro_tot,3,Nch))
+    Mxy_out = np.zeros((Nro_tot,Nch), dtype=complex)
+    times_out = np.array(signal['times'])
+
+    # 3. Read each channel and store in array
+    for ch, key in enumerate(list(channels.keys())):
+        one_ch_data = np.array(channels[key])
+
+        M_vec_out[:,:,ch] = one_ch_data
+        Mxy_out[:,ch] = one_ch_data[:,0] + 1j*one_ch_data[:,1]
+
+
+    return Mxy_out, M_vec_out, times_out
+
+
+def recon_jemris_output(Mxy_out, dims):
+    """Cartesian reconstruction of JEMRIS simulation output
+    #  (No EPI/interleave reordering)
+
+    Inputs
+    ------
+    Mxy_out : np.ndarray
+        Complex Nt x Nch array where Nt is the total number of data points and Nch is the number of channels
+
+    dims : array_like
+        [Nro], [Nro, Nline], or [Nro, Nline, Nslice]
+
+
+    Returns
+    -------
+    kspace : np.ndarray
+        Complex k-space matrix
+    imspace : np.ndarray
+        Complex image space matrix
+
+    """
+    Nt, Nch = Mxy_out.shape
+    print(Nt)
+    if Nt != np.prod(dims):
+        raise ValueError("The dimensions provided do not match the total number of samples.")
+    Nro = dims[0]
+    Nline = 1
+    Nslice = 1
+
+    ld = len(dims)
+
+    if ld >= 1:
+        Nro = dims[0]
+    if ld >= 2:
+        Nline = dims[1]
+    if ld == 3:
+        Nslice = dims[2]
+    if ld > 3:
+        raise ValueError("dims should have at 1-3 numbers : Nro, (Nline), and (Nslice)")
+
+    kspace = np.zeros((Nro, Nline, Nslice, Nch),dtype=complex)
+    imspace = np.zeros((Nro, Nline, Nslice, Nch),dtype=complex)
+
+    np.reshape(Mxy_out, (Nro, Nline, Nslice))
+
+    for ch in range(Nch):
+        kspace[:,:,:,ch] = np.reshape(Mxy_out[:, ch], (Nro, Nline, Nslice), order='F')
+        for sl in range(Nslice):
+                imspace[:,:,sl,ch] = np.fft.fftshift(np.fft.ifft2(kspace[:,:,sl,ch]))
+
+    return kspace, imspace
+
+
+def save_recon_images(imspace, method='sum_squares'):
+    """For now, this method combines channels and returns the image matrix
+       (Future, for GUI use: add options to save as separate image files / mat / etc. in a directory)
+
+    Inputs
+    ------
+    imspace : np.ndarray
+        Complex image space. The last dimension must be # Channels.
+    method : str, optional
+        Method used for combining channels
+        Either 'sum_squares' (default, sum of squares) or 'sum_abs' (sum of absolute values)
+
+    Returns
+    -------
+    images : np.ndarray
+        Real, channel_combined image matrix
+
+    """
+    if method == 'sum_squares':
+        images = np.sum(np.square(np.absolute(imspace)),axis=-1)
+    elif method == 'sum_abs':
+        images = np.sum(np.absolute(imspace), axis=-1)
+    else:
+        raise ValueError("Method not recognized. Must be either sum_squares or sum_abs")
+    return images
+
+
+if __name__ == '__main__':
+    Mxy_out, M_vec_out, times_out = read_jemris_output('sim/test0405/signals.h5')
+    kk, im = recon_jemris_output(Mxy_out, dims=[15,15])
+    images = save_recon_images(im)
+
+
+    plt.figure(1)
+    plt.subplot(121)
+    plt.imshow(np.absolute(kk[:,:,0,0]))
+    plt.title("k-space")
+    plt.gray()
+    plt.subplot(122)
+    print(images)
+    plt.imshow(np.squeeze(images[:,:,0]))
+    plt.title("Image space")
+    plt.gray()
+    plt.show()

+ 42 - 0
LF_scanner/py2jemris/record_seq2xml_times.py

@@ -0,0 +1,42 @@
+# Records how long seq2xml takes to convert SPGR sequences at N = 16, 32, 64, and 128
+
+from seq2xml import seq2xml
+import timeit
+from pypulseq.Sequence.sequence import Sequence
+from scipy.io import savemat, loadmat
+import numpy as np
+from datetime import datetime
+from pulseq_jemris_simulator import simulate_pulseq_jemris, recon_jemris
+
+def tbt_seq2xml(n):
+    seq = Sequence()
+    seq_path = f'sim/ismrm_abstract/spgr_var_N/spgr_gspoil_N{n}_Ns1_TE10ms_TR50ms_FA30deg_acq_112020.seq'
+    seq.read(seq_path)
+    seq2xml(seq, seq_name=f'spgr{n}',out_folder=f'sim/ismrm_abstract/spgr_var_N/spgr{n}')
+
+def tbt_sim_pipeline(n):
+    # SPGR
+    phantom_info = {'fov': 0.25, 'N': n, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': 0}
+    sps = f'sim/ismrm_abstract/spgr_var_N/spgr_gspoil_N{n}_Ns1_TE10ms_TR50ms_FA30deg_acq_112020.seq'
+    sim_name = f'ismrm_abstract\\spgr_var_N\\spgr{n}'
+
+    # Use cylindrical phantom to time
+    # Simulate
+    simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name, coil_fov=0.25)
+    kk, im, images = recon_jemris(file='sim/' + sim_name + '/signals.h5', dims=[n, n])
+    savemat('sim/' + sim_name + '/utest_pulseq_sim_output.mat', {'images': images, 'kspace': kk, 'imspace': im})
+
+
+if __name__ == '__main__':
+    #for n in [8]:
+    #    print(f'Timing seq2xml for n = {n}')
+    #    ttken = timeit.timeit('tbt_seq2xml(N)', setup=f'N = {n}; from __main__ import tbt_seq2xml',number=10)
+    #    print(f'Avg. time over 10 reps is {ttken}')
+
+
+
+    #Time pipeline
+    q = 1
+    n = 8
+    ttken = timeit.timeit('tbt_sim_pipeline(n)',setup=f'n={n}; from __main__ import tbt_sim_pipeline',number=q)
+    print(f'Avg. sim pipeline time over {q} reps for n = {n} is {ttken} seconds.')

+ 5 - 0
LF_scanner/py2jemris/requirements.txt

@@ -0,0 +1,5 @@
+h5py~=2.10.0
+matplotlib~=3.3.1
+numpy~=1.19.1
+scipy~=1.5.2
+pypulseq~=1.2.0.post3

BIN
LF_scanner/py2jemris/rf_1.h5


BIN
LF_scanner/py2jemris/rf_10.h5


BIN
LF_scanner/py2jemris/rf_11.h5


BIN
LF_scanner/py2jemris/rf_12.h5


BIN
LF_scanner/py2jemris/rf_13.h5


BIN
LF_scanner/py2jemris/rf_14.h5


BIN
LF_scanner/py2jemris/rf_15.h5


BIN
LF_scanner/py2jemris/rf_16.h5


BIN
LF_scanner/py2jemris/rf_17.h5


BIN
LF_scanner/py2jemris/rf_18.h5


BIN
LF_scanner/py2jemris/rf_19.h5


BIN
LF_scanner/py2jemris/rf_2.h5


BIN
LF_scanner/py2jemris/rf_20.h5


BIN
LF_scanner/py2jemris/rf_21.h5


BIN
LF_scanner/py2jemris/rf_22.h5


BIN
LF_scanner/py2jemris/rf_23.h5


BIN
LF_scanner/py2jemris/rf_24.h5


BIN
LF_scanner/py2jemris/rf_25.h5


BIN
LF_scanner/py2jemris/rf_3.h5


BIN
LF_scanner/py2jemris/rf_4.h5


BIN
LF_scanner/py2jemris/rf_5.h5


BIN
LF_scanner/py2jemris/rf_6.h5


BIN
LF_scanner/py2jemris/rf_7.h5


BIN
LF_scanner/py2jemris/rf_8.h5


BIN
LF_scanner/py2jemris/rf_9.h5


BIN
LF_scanner/py2jemris/sample.h5


+ 307 - 0
LF_scanner/py2jemris/seq2xml.py

@@ -0,0 +1,307 @@
+# seq2xml.py : converts Pulseq (.seq) files into JEMRIS (.xml) sequences
+# Gehua Tong
+# March 2020
+
+from LF_scanner.pypulseq.Sequence.sequence import Sequence
+from LF_scanner.pypulseq.calc_duration import calc_duration
+import xml.etree.ElementTree as ET
+import h5py
+import numpy as np
+from math import pi
+
+
+# Notes
+# This is for generating an .xml file for input into JEMRIS simulator, from a Pulseq .seq file
+# The opposite philosophies make the .xml encoding suboptimal for storage
+# (because seq files consists of flattened-out Blocks while the JEMRIS format minimizes repetition using loops
+#  and consists of many cross-referencing of parameters)
+
+# Consider: for virtual scanner, have scripts that generate .xml and .seq at the same time (looped vs. flattened)
+# (but isn't JEMRIS already doing that? JEMRIS does have an "output to pulseq" functionality)
+# though then, having a Python interface instead of a MATLAB one is helpful in the open-source aspect
+
+
+# Unit conversion constants (comment with units before & after)
+rf_const = 2 * pi / 1000  # from Pulseq[Hz]=[1/s] to JEMRIS[rad/ms] rf magnitude conversion constant
+g_const = 2 * pi / 1e6  # from Pulseq [Hz/m] to JEMRIS [(rad/ms)/mm] gradient conversion constant
+slew_const = g_const / 1000  # from Pulseq [Hz/(m*s)] to JEMRIS [(rad/ms)/(mm*ms)]
+ga_const = 2 * pi / 1000  # from Pulseq[1/m] to JEMRIS [2*pi/mm] gradient area conversion constant
+sec2ms = 1000  # time conversion constant
+rad2deg = 180/pi
+freq_const = 2 * pi / 1000 # From Hz to rad/ms
+
+def seq2xml(seq, seq_name, out_folder):
+    """
+    # Takes a Pulseq sequence and converts it into .xml format for JEMRIS
+    # All RF and gradient shapes are stored as .h5 files
+
+    Inputs
+    ------
+    seq : pypulseq.Sequence.sequence.Sequence
+    seq_name : name of output .xml file
+    out_folder : str
+        Path to output folder for .xml file
+
+    Returns
+    -------
+    seq_tree : xml.etree.ElementTree
+        Tree object used for generating the sequence .xml file
+    seq_path : str
+        Path to stored .xml sequence
+    """
+
+    # Parameters is the root of the xml
+    root = ET.Element("Parameters")
+    # Add gradient limits (seem to be the only parameters shared by both formats)
+    # TODO check units
+    root.set("GradMaxAmpl", str(seq.system.max_grad*g_const))
+    root.set("GradSlewRate", str(seq.system.max_slew*slew_const))
+
+    # ConcatSequence is the element for the sequence itself;
+    # Allows addition of multiple AtomicSequence
+    C0 = ET.SubElement(root, "ConcatSequence")
+
+    # Use helper functions to save all RF and only arbitrary gradient info
+    rf_shapes_path_dict = save_rf_library_info(seq, out_folder)
+    grad_shapes_path_dict = save_grad_library_info(seq, out_folder)
+    #print(grad_shapes_path_dict)
+    #///////////////////////////////////////////////////////////////////////////
+
+
+    rf_name_ind = 1
+    grad_name_ind = 1
+    delay_name_ind = 1
+    adc_name_ind = 1
+
+
+    ##### Main loop! #####
+    # Go through all blocks and add in events; each block is one AtomicSequence
+    for block_ind in range(1,len(seq.block_events)+1):
+        blk = seq.get_block(block_ind).__dict__
+        exists_adc = 'adc' in blk.keys()
+        adc_already_added = False
+        # Note: "EmptyPulse" class seems to allow variably spaced ADC sampling
+        # Distinguish between delay and others
+        # Question: in pulseq, does delay happen together with other events?
+        #           (for now, we assume delay always happens by itself)
+        # About name of atomic sequences: not adding names for now;
+        # (Likely it will cause no problems because there is no cross-referencing)
+        C_block = ET.SubElement(C0, "ATOMICSEQUENCE")
+        C_block.set("Name", f'C{block_ind}')
+        for key in blk.keys():
+            # Case of RF pulse
+            if key == 'rf':
+                rf = blk['rf']
+                if not (rf is None):
+                    rf_atom = ET.SubElement(C_block, "EXTERNALRFPULSE")
+
+                    rf_atom.set("Name", f'R{rf_name_ind}')
+                    rf_name_ind += 1
+
+                    rf_atom.set("InitialDelay", str(rf.delay*sec2ms))
+                    rf_atom.set("InitialPhase", str(rf.phase_offset*rad2deg))
+                    rf_atom.set("Frequency", str(rf.freq_offset*freq_const))
+                    # Find ID of this rf event
+                    rf_id = seq.block_events[block_ind][1]
+                    rf_atom.set("Filename", rf_shapes_path_dict[rf_id])
+                    rf_atom.set("Scale","1")
+                    rf_atom.set("Interpolate", "0") # Do interpolate
+
+            gnames_map = {'gx':2, 'gy':3, 'gz':4}
+            if key in ['gx', 'gy', 'gz']:
+                g = blk[key]
+                if not (g is None):
+                    if g.type == "trap":
+                        if g.amplitude != 0:
+                            g_atom = ET.SubElement(C_block, "TRAPGRADPULSE")
+                            g_atom.set("Name", f'G{grad_name_ind}')
+                            grad_name_ind += 1
+                            if g.flat_time > 0:
+                                # 1. Case where flat_time is nonzero
+                                # Second, fix FlatTopArea and FlatTopTime
+                                g_atom.set("FlatTopArea", str(g.flat_area*ga_const))
+                                g_atom.set("FlatTopTime", str(g.flat_time*sec2ms))
+
+                                # Last, set axis and delay
+                            else:
+                                # 2. Case of triangular pulse (i.e. no flat part)
+                                g_atom.set("MaxAmpl", str(np.absolute(g.amplitude*g_const))) # limit amplitude
+                                g_atom.set("Area", str(0.5*(g.rise_time + g.fall_time)*g.amplitude*ga_const))
+
+                            # Third, limit duration by limiting slew rate
+                            g_atom.set("SlewRate", str((g.amplitude * g_const) / (g.fall_time * sec2ms)))
+                            g_atom.set("Asymmetric", str(g.fall_time / g.rise_time))
+                            g_atom.set("Axis", key.upper())
+                            g_atom.set("InitialDelay", str(g.delay*sec2ms))
+
+
+                        # Add ADC if it exists and then "mark as complete"
+                    elif g.type == "grad":
+                        # Set arbitrary grad parameters
+                        # Need to load h5 file again, just like in RF
+                        g_id = seq.block_events[block_ind][gnames_map[key]]
+                        g_atom = ET.SubElement(C_block, "EXTERNALGRADPULSE")
+                        g_atom.set("Name", f'G{grad_name_ind}')
+                        grad_name_ind += 1
+
+                        g_atom.set("Axis", key.upper())
+                        g_atom.set("Filename", grad_shapes_path_dict[g_id])
+                        g_atom.set("Scale","1")
+                        g_atom.set("Interpolate","0")
+                        g_atom.set("InitialDelay",str(g.delay*sec2ms))
+
+                    else:
+                        print(f'Gradient type "{g.type}" indicated')
+                        raise ValueError("Gradient's type should be either trap or grad")
+
+                    if exists_adc and not adc_already_added:
+                        adc = blk['adc']
+                        if not (adc is None):
+                            dwell = adc.dwell*sec2ms
+                            adc_delay = adc.delay*sec2ms
+                            Nro = adc.num_samples
+
+                            gzero_adc = ET.SubElement(C_block, "TRAPGRADPULSE")
+                            gzero_adc.set("Name", f'S{adc_name_ind}')
+                            adc_name_ind += 1
+
+                            gzero_adc.set("ADCs", str(Nro))
+                            gzero_adc.set("FlatTopTime", str(dwell*Nro))
+                            gzero_adc.set("FlatTopArea","0")
+                            gzero_adc.set("InitialDelay", str(adc_delay))
+
+                            adc_already_added = True
+                            # Now, it always attach ADC to the first gradient found among keys()
+                            # This might be tricky
+                            # suggestion 1: check the duration of gradient?
+                            # suggestion 2: just do any gradient and hope it works
+                            # suggestion 3: read the JEMRIS documentation/try on GUI
+
+            if key == 'delay':
+                delay_dur = blk['delay'].delay
+                delay_atom = ET.SubElement(C0, "DELAYATOMICSEQUENCE")
+
+                delay_atom.set("Name",f'D{delay_name_ind}')
+                delay_name_ind += 1
+
+                delay_atom.set("Delay",str(delay_dur*sec2ms))
+                delay_atom.set("DelayType","B2E")
+
+    # Output it!
+    seq_tree = ET.ElementTree(root)
+
+    seq_path = out_folder + '/' + seq_name + '.xml'
+    seq_tree.write(seq_path)
+
+    return seq_tree, seq_path
+
+def save_rf_library_info(seq, out_folder):
+    """
+    Helper function that stores distinct RF waveforms for seq2xml
+    """
+    # RF library
+    rf_shapes_path_dict = {}
+    for rf_id in list(seq.rf_library.data.keys()): # for each RF ID
+        # JEMRIS wants:
+        # "Filename":  A HDF5-file with a single dataset "extpulse"
+        # of size N x 3 where the 1st column holds the time points,
+        # and 2nd and 3rd column hold amplitudes and phases, respectively.
+        # Phase units should be radians.
+        # Time is assumed to increase and start at zero.
+        # The last time point defines the length of the pulse.
+        file_path_partial = f'rf_{int(rf_id)}.h5'
+        file_path_full = out_folder + '/' + file_path_partial
+        # De-compress using inbuilt PyPulseq method
+        rf = seq.rf_from_lib_data(seq.rf_library.data[rf_id])
+        # Only extract time, magnitude, and phase
+        # We leave initial phase and freq offset to the main conversion loop)
+        times = rf.t
+        magnitude = np.absolute(rf.signal)
+        phase = np.angle(rf.signal)
+
+        N = len(magnitude)
+        # Create file
+        f = h5py.File(file_path_full, 'a')
+        if "extpulse" in f.keys():
+            del f["extpulse"]
+
+        #f.create_dataset("extpulse", (N,3), dtype='f')
+        f.create_dataset("extpulse",(3,N),dtype='f')
+
+
+        times = times - times[0]
+        f["extpulse"][0,:] = times*sec2ms#*sec2ms
+        f["extpulse"][1,:] = magnitude*rf_const
+        f["extpulse"][2,:] = phase#"Phase should be radians"
+        f.close()
+        rf_shapes_path_dict[rf_id] = file_path_partial
+
+    return rf_shapes_path_dict
+
+
+# Helper function
+def save_grad_library_info(seq, out_folder):
+    """
+    Helper function that stores distinct gradients for seq2xml
+    """
+
+    #file_paths = [out_folder + f'grad_{int(grad_id)}.h5' for grad_id in range(1,N_grad_id+1)]
+    grad_shapes_path_dict = {}
+    processed_g_inds = []
+
+    for nb in range(1,len(seq.block_events)+1):
+        gx_ind, gy_ind, gz_ind = seq.block_events[nb][2:5]
+        for axis_ind, g_ind in enumerate([gx_ind, gy_ind, gz_ind]):
+            # Only save a gradient file if ...(a) it has non-zero index
+            #                                 (b) it is type 'grad', not 'trap'
+            #      s                       and (c) its index has not been processed
+            if g_ind != 0 and len(seq.grad_library.data[g_ind]) == 3 \
+                and g_ind not in processed_g_inds:
+                print(f'Adding Gradient Number {g_ind}')
+                this_block = seq.get_block(nb)
+                file_path_partial = f'grad_{int(g_ind)}.h5'
+                file_path_full = out_folder + '/' + file_path_partial
+
+                #TODO make it work for x/y/z
+                if axis_ind == 0:
+                    t_points = this_block.gx.t
+                    g_shape = this_block.gx.waveform
+                elif axis_ind == 1:
+                    t_points = this_block.gy.t
+                    g_shape = this_block.gy.waveform
+                elif axis_ind == 2:
+                    t_points = this_block.gz.t
+                    g_shape = this_block.gz.waveform
+
+                N = len(t_points)
+                # Create file
+                f = h5py.File(file_path_full, 'a')
+                if "extpulse" in f.keys():
+                    del f["extpulse"]
+                f.create_dataset("extpulse", (2,N), dtype='f')
+                f["extpulse"][0,:] = t_points * sec2ms
+                f["extpulse"][1,:] = g_shape * g_const
+                f.close()
+                grad_shapes_path_dict[g_ind] = file_path_partial
+                processed_g_inds.append(g_ind)
+
+    return grad_shapes_path_dict
+
+
+
+
+
+
+
+if __name__ == '__main__':
+    print('')
+    seq = Sequence()
+    seq.read('sim/test0504/gre32.seq')
+    seq2xml(seq, seq_name='gre32_twice', out_folder='sim/test0504')
+#    seq.read('seq_files/spgr_gspoil_N16_Ns1_TE5ms_TR10ms_FA30deg.seq')
+    #seq.read('benchmark_seq2xml/gre_jemris.seq')
+#    seq.read('try_seq2xml/spgr_gspoil_N15_Ns1_TE5ms_TR10ms_FA30deg.seq')
+    #seq.read('orc_test/seq_2020-02-26_ORC_54_9_384_1.seq')
+    #stree = seq2xml(seq, seq_name="ORC-Marina", out_folder='orc_test')
+
+

+ 318 - 0
LF_scanner/py2jemris/seq2xml_fixed_delay.py

@@ -0,0 +1,318 @@
+# seq2xml.py : converts Pulseq (.seq) files into JEMRIS (.xml) sequences
+# Gehua Tong
+# March 2020
+
+# from pypulseq.Sequence.sequence import Sequence
+# from pypulseq.calc_duration import calc_duration
+import pypulseq as pp
+import xml.etree.ElementTree as ET
+import h5py
+import numpy as np
+from math import pi
+
+def save_rf_library_info(seq, out_folder):
+    """
+    Helper function that stores distinct RF waveforms for seq2xml
+    """
+    # RF library
+    rf_shapes_path_dict = {}
+    for rf_id in list(seq.rf_library.data.keys()): # for each RF ID
+        # JEMRIS wants:
+        # "Filename":  A HDF5-file with a single dataset "extpulse"
+        # of size N x 3 where the 1st column holds the time points,
+        # and 2nd and 3rd column hold amplitudes and phases, respectively.
+        # Phase units should be radians.
+        # Time is assumed to increase and start at zero.
+        # The last time point defines the length of the pulse.
+        file_path_partial = f'rf_{int(rf_id)}.h5'
+        file_path_full = out_folder + '/' + file_path_partial
+        # De-compress using inbuilt PyPulseq method
+        rf = seq.rf_from_lib_data(seq.rf_library.data[rf_id])
+        # Only extract time, magnitude, and phase
+        # We leave initial phase and freq offset to the main conversion loop)
+        times = rf.t
+        magnitude = np.absolute(rf.signal)
+        phase = np.angle(rf.signal)
+
+        N = len(magnitude)
+        # Create file
+        f = h5py.File(file_path_full, 'a')
+        if "extpulse" in f.keys():
+            del f["extpulse"]
+
+        #f.create_dataset("extpulse", (N,3), dtype='f')
+        f.create_dataset("extpulse",(3,N),dtype='f')
+
+
+        times = times - times[0]
+        f["extpulse"][0,:] = times*sec2ms#*sec2ms
+        f["extpulse"][1,:] = magnitude*rf_const
+        f["extpulse"][2,:] = phase#"Phase should be radians"
+        f.close()
+        rf_shapes_path_dict[rf_id] = file_path_partial
+
+    return rf_shapes_path_dict
+
+
+# Helper function
+def save_grad_library_info(seq, out_folder):
+    """
+    Helper function that stores distinct gradients for seq2xml
+    """
+
+    #file_paths = [out_folder + f'grad_{int(grad_id)}.h5' for grad_id in range(1,N_grad_id+1)]
+    grad_shapes_path_dict = {}
+    processed_g_inds = []
+
+    for nb in range(1,len(seq.block_events)+1):
+        gx_ind, gy_ind, gz_ind = seq.block_events[nb][2:5]
+        for axis_ind, g_ind in enumerate([gx_ind, gy_ind, gz_ind]):
+            # Only save a gradient file if ...(a) it has non-zero index
+            #                                 (b) it is type 'grad', not 'trap'
+            #      s                       and (c) its index has not been processed
+            if g_ind != 0 and len(seq.grad_library.data[g_ind]) == 3 \
+                and g_ind not in processed_g_inds:
+                print(f'Adding Gradient Number {g_ind}')
+                this_block = seq.get_block(nb)
+                file_path_partial = f'grad_{int(g_ind)}.h5'
+                file_path_full = out_folder + '/' + file_path_partial
+
+                #TODO make it work for x/y/z
+                if axis_ind == 0:
+                    t_points = this_block.gx.t
+                    g_shape = this_block.gx.waveform
+                elif axis_ind == 1:
+                    t_points = this_block.gy.t
+                    g_shape = this_block.gy.waveform
+                elif axis_ind == 2:
+                    t_points = this_block.gz.t
+                    g_shape = this_block.gz.waveform
+
+                N = len(t_points)
+                # Create file
+                f = h5py.File(file_path_full, 'a')
+                if "extpulse" in f.keys():
+                    del f["extpulse"]
+                f.create_dataset("extpulse", (2,N), dtype='f')
+                f["extpulse"][0,:] = t_points * sec2ms
+                f["extpulse"][1,:] = g_shape * g_const
+                f.close()
+                grad_shapes_path_dict[g_ind] = file_path_partial
+                processed_g_inds.append(g_ind)
+
+    return grad_shapes_path_dict
+
+# Notes
+# This is for generating an .xml file for input into JEMRIS simulator, from a Pulseq .seq file
+# The opposite philosophies make the .xml encoding suboptimal for storage
+# (because seq files consists of flattened-out Blocks while the JEMRIS format minimizes repetition using loops
+#  and consists of many cross-referencing of parameters)
+
+# Consider: for virtual scanner, have scripts that generate .xml and .seq at the same time (looped vs. flattened)
+# (but isn't JEMRIS already doing that? JEMRIS does have an "output to pulseq" functionality)
+# though then, having a Python interface instead of a MATLAB one is helpful in the open-source aspect
+
+
+# Unit conversion constants (comment with units before & after)
+rf_const = 2 * pi / 1000  # from Pulseq[Hz]=[1/s] to JEMRIS[rad/ms] rf magnitude conversion constant
+g_const = 2 * pi / 1e6  # from Pulseq [Hz/m] to JEMRIS [(rad/ms)/mm] gradient conversion constant
+slew_const = g_const / 1000  # from Pulseq [Hz/(m*s)] to JEMRIS [(rad/ms)/(mm*ms)]
+ga_const = 2 * pi / 1000  # from Pulseq[1/m] to JEMRIS [2*pi/mm] gradient area conversion constant
+sec2ms = 1000  # time conversion constant
+rad2deg = 180/pi
+freq_const = 2 * pi / 1000 # From Hz to rad/ms
+
+#def seq2xml(seq, seq_name, out_folder):
+"""
+# Takes a Pulseq sequence and converts it into .xml format for JEMRIS
+# All RF and gradient shapes are stored as .h5 files
+
+Inputs
+------
+seq : pypulseq.Sequence.sequence.Sequence
+seq_name : name of output .xml file
+out_folder : str
+    Path to output folder for .xml file
+
+Returns
+-------
+seq_tree : xml.etree.ElementTree
+    Tree object used for generating the sequence .xml file
+seq_path : str
+    Path to stored .xml sequence
+"""
+
+seq = pp.Sequence()
+seq.read('C:\\MRI_seq\\pypulseq\\seq_examples\\new_scripts\\epi_se_pypulseq.seq')
+seq_name='epi_se_pypulseq.seq_fixed_delay'
+out_folder='C:\\MRI_seq\\pypulseq\\seq_examples\\new_scripts'
+
+#seq.read('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_SE\\t1_SE_matrx32x32.seq')
+#seq_name='t1_SE_matrx32x32_fixed_delay'
+#out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_SE'
+
+# Parameters is the root of the xml
+root = ET.Element("Parameters")
+# Add gradient limits (seem to be the only parameters shared by both formats)
+# TODO check units
+root.set("GradMaxAmpl", str(seq.system.max_grad*g_const))
+root.set("GradSlewRate", str(seq.system.max_slew*slew_const))
+
+# ConcatSequence is the element for the sequence itself;
+# Allows addition of multiple AtomicSequence
+C0 = ET.SubElement(root, "ConcatSequence")
+
+# Use helper functions to save all RF and only arbitrary gradient info
+rf_shapes_path_dict = save_rf_library_info(seq, out_folder)
+grad_shapes_path_dict = save_grad_library_info(seq, out_folder)
+#print(grad_shapes_path_dict)
+#///////////////////////////////////////////////////////////////////////////
+
+
+rf_name_ind = 1
+grad_name_ind = 1
+delay_name_ind = 1
+adc_name_ind = 1
+
+
+##### Main loop! #####
+# Go through all blocks and add in events; each block is one AtomicSequence
+for block_ind in range(1,len(seq.block_events)+1):
+#for block_ind in range(3,4):
+    blk = seq.get_block(block_ind).__dict__
+    exists_adc = 'adc' in blk.keys()
+    adc_already_added = False
+    # Note: "EmptyPulse" class seems to allow variably spaced ADC sampling
+    # Distinguish between delay and others
+    # Question: in pulseq, does delay happen together with other events?
+    #           (for now, we assume delay always happens by itself)
+    # About name of atomic sequences: not adding names for now;
+    # (Likely it will cause no problems because there is no cross-referencing)
+    C_block = ET.SubElement(C0, "ATOMICSEQUENCE")
+    C_block.set("Name", f'C{block_ind}')
+    for key in blk.keys():
+        # Case of RF pulse
+        if key == 'rf':
+            rf = blk['rf']
+            if type(rf) != type(None):
+                rf_atom = ET.SubElement(C_block, "EXTERNALRFPULSE")
+    
+                rf_atom.set("Name", f'R{rf_name_ind}')
+                rf_name_ind += 1
+
+                rf_atom.set("InitialDelay", str(rf.delay*sec2ms))
+                rf_atom.set("InitialPhase", str(rf.phase_offset*rad2deg))
+                rf_atom.set("Frequency", str(rf.freq_offset*freq_const))
+                # Find ID of this rf event
+                rf_id = seq.block_events[block_ind][1]
+                rf_atom.set("Filename", rf_shapes_path_dict[rf_id])
+                rf_atom.set("Scale","1")
+                rf_atom.set("Interpolate", "0") # Do interpolate
+
+        gnames_map = {'gx':2, 'gy':3, 'gz':4}
+        if key in ['gx', 'gy', 'gz']:
+            g = blk[key]
+            if type(g) != type(None): 
+                if g.type == "trap":
+                    if g.amplitude != 0:
+                        g_atom = ET.SubElement(C_block, "TRAPGRADPULSE")
+                        g_atom.set("Name", f'G{grad_name_ind}')
+                        grad_name_ind += 1
+                        if g.flat_time > 0:
+                            # 1. Case where flat_time is nonzero
+                            # Second, fix FlatTopArea and FlatTopTime
+                            g_atom.set("FlatTopArea", str(g.flat_area*ga_const))
+                            g_atom.set("FlatTopTime", str(g.flat_time*sec2ms))
+    
+                            # Last, set axis and delay
+                        else:
+                            # 2. Case of triangular pulse (i.e. no flat part)
+                            g_atom.set("MaxAmpl", str(np.absolute(g.amplitude*g_const))) # limit amplitude
+                            g_atom.set("Area", str(0.5*(g.rise_time + g.fall_time)*g.amplitude*ga_const))
+    
+                        # Third, limit duration by limiting slew rate
+                        g_atom.set("SlewRate", str((g.amplitude * g_const) / (g.fall_time * sec2ms)))
+                        g_atom.set("Asymmetric", str(g.fall_time / g.rise_time))
+                        g_atom.set("Axis", key.upper())
+                        g_atom.set("InitialDelay", str(g.delay*sec2ms))
+    
+    
+                    # Add ADC if it exists and then "mark as complete"
+                elif g.type == "grad":
+                    # Set arbitrary grad parameters
+                    # Need to load h5 file again, just like in RF
+                    g_id = seq.block_events[block_ind][gnames_map[key]]
+                    g_atom = ET.SubElement(C_block, "EXTERNALGRADPULSE")
+                    g_atom.set("Name", f'G{grad_name_ind}')
+                    grad_name_ind += 1
+    
+                    g_atom.set("Axis", key.upper())
+                    g_atom.set("Filename", grad_shapes_path_dict[g_id])
+                    g_atom.set("Scale","1")
+                    g_atom.set("Interpolate","0")
+                    g_atom.set("InitialDelay",str(g.delay*sec2ms))
+    
+                else:
+                    print(f'Gradient type "{g.type}" indicated')
+                    raise ValueError("Gradient's type should be either trap or grad")
+    
+                if exists_adc and not adc_already_added:
+                    adc = blk['adc']
+                    if type(adc) != type(None): 
+                        dwell = adc.dwell*sec2ms
+                        adc_delay = adc.delay*sec2ms
+                        Nro = adc.num_samples
+        
+                        gzero_adc = ET.SubElement(C_block, "TRAPGRADPULSE")
+                        gzero_adc.set("Name", f'S{adc_name_ind}')
+                        adc_name_ind += 1
+        
+                        gzero_adc.set("ADCs", str(Nro))
+                        gzero_adc.set("FlatTopTime", str(dwell*Nro))
+                        gzero_adc.set("FlatTopArea","0")
+                        gzero_adc.set("InitialDelay", str(adc_delay))
+        
+                        adc_already_added = True
+                    # Now, it always attach ADC to the first gradient found among keys()
+                    # This might be tricky
+                    # suggestion 1: check the duration of gradient?
+                    # suggestion 2: just do any gradient and hope it works
+                    # suggestion 3: read the JEMRIS documentation/try on GUI
+
+    if type(blk['gx']) == type(None) and type(blk['gy']) == type(None) and type(blk['gz']) == type(None) and type(blk['rf']) == type(None) and type(blk['adc']) == type(None) :
+        delay_dur = blk['block_duration']
+        delay_atom = ET.SubElement(C0, "DELAYATOMICSEQUENCE")
+
+        delay_atom.set("Name",f'D{delay_name_ind}')
+        delay_name_ind += 1
+
+        delay_atom.set("Delay",str(delay_dur*sec2ms))
+        delay_atom.set("DelayType","B2E")
+
+# Output it!
+seq_tree = ET.ElementTree(root)
+
+seq_path = out_folder + '/' + seq_name + '.xml'
+seq_tree.write(seq_path)
+
+#return seq_tree, seq_path
+
+
+
+
+
+
+
+
+# if __name__ == '__main__':
+#     print('')
+#     seq = pp.Sequence()
+#     seq.read('gre_pypulseq_1slice_for_sim.seq')
+#     seq2xml(seq, seq_name='gre_test1', out_folder='test')
+#    seq.read('seq_files/spgr_gspoil_N16_Ns1_TE5ms_TR10ms_FA30deg.seq')
+    #seq.read('benchmark_seq2xml/gre_jemris.seq')
+#    seq.read('try_seq2xml/spgr_gspoil_N15_Ns1_TE5ms_TR10ms_FA30deg.seq')
+    #seq.read('orc_test/seq_2020-02-26_ORC_54_9_384_1.seq')
+    #stree = seq2xml(seq, seq_name="ORC-Marina", out_folder='orc_test')
+
+

+ 40 - 0
LF_scanner/py2jemris/sim/8chheadcyl.xml

@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="utf-8"?>
+<CoilArray>
+   <BIOTSAVARTLOOP Azimuth="0" Dim="2" Extent="256" Name="C1" Points="64" Polar="90" Radius="100"
+                   XPos="256"
+                   YPos="0"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="45" Dim="2" Extent="256" Name="C2" Points="64" Polar="90" Radius="100"
+                   XPos="181.019"
+                   YPos="181.019"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="90" Dim="2" Extent="256" Name="C3" Points="64" Polar="90" Radius="100"
+                   XPos="0"
+                   YPos="256"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="135" Dim="2" Extent="256" Name="C4" Points="64" Polar="90"
+                   Radius="100"
+                   XPos="-181.019"
+                   YPos="181.019"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="180" Dim="2" Extent="256" Name="C5" Points="64" Polar="90"
+                   Radius="100"
+                   XPos="-256"
+                   YPos="0"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="225" Dim="2" Extent="256" Name="C6" Points="64" Polar="90"
+                   Radius="100"
+                   XPos="-181.019"
+                   YPos="-181.019"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="270" Dim="2" Extent="256" Name="C7" Points="64" Polar="90"
+                   Radius="100"
+                   XPos="0"
+                   YPos="-256"
+                   ZPos="0"/>
+   <BIOTSAVARTLOOP Azimuth="315" Dim="2" Extent="256" Name="C8" Points="64" Polar="90"
+                   Radius="100"
+                   XPos="181.019"
+                   YPos="-181.019"
+                   ZPos="0"/>
+</CoilArray>

+ 0 - 0
LF_scanner/py2jemris/sim/__init__.py


+ 22 - 0
LF_scanner/py2jemris/sim/epi.xml

@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Parameters FOVx="256" FOVy="256" GradMaxAmpl="2" GradSlewRate="10" Name="P" Nx="64" Ny="64" TE="50" TR="100">
+   <ConcatSequence Name="C1">
+      <AtomicSequence Name="A1">
+         <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="90" Name="P1"/>
+      </AtomicSequence>
+      <AtomicSequence Name="A2">
+         <TrapGradPulse Area="-0.5*abs(A)" Axis="GX" Name="P2" Observe="A=P4.Area"/>
+         <TrapGradPulse Area="KMY" Axis="GY" Name="P3" Observe="KMY=P.KMAXy"/>
+      </AtomicSequence>
+      <DelayAtomicSequence Delay="TE" DelayType="C2C" Name="D1" Observe="TE=P.TE" StartSeq="A1" StopSeq="C2"/>
+      <ConcatSequence Name="C2" Observe="NY=P.Ny" Repetitions="NY">
+         <AtomicSequence Name="A3">
+            <TrapGradPulse ADCs="NX" Axis="GX" FlatTopArea="2*KMX*(-1)^C" FlatTopTime="1" Name="P4" Observe="KMX=P.KMAXx, C=C2.Counter, NX=P.Nx"/>
+         </AtomicSequence>
+         <AtomicSequence Name="A4">
+            <TrapGradPulse Area="ite(1+C,R,0,-DKY)" Axis="GY" Name="P5" Observe="DKY=P.DKy, C=C2.Counter, R=C2.Repetitions"/>
+         </AtomicSequence>
+      </ConcatSequence>
+      <DelayAtomicSequence Delay="TR" DelayType="B2E" Name="D2" Observe="TR=P.TR" StartSeq="A1"/>
+   </ConcatSequence>
+</Parameters>

+ 23 - 0
LF_scanner/py2jemris/sim/gre.xml

@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Parameters FOVx="128" FOVy="128" FOVz="1" Name="P" Nx="32" Ny="32" Nz="1" TE="8" TR="50">
+   <ConcatSequence Name="R">
+      <ConcatSequence Name="C" Observe="NY=P.Ny" Repetitions="NY">
+         <ATOMICSEQUENCE Name="A1">
+            <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="20" InitialPhase="C*(C+1)*50" Name="P1" Observe="C=C.Counter"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TE" DelayType="C2C" Name="D1" Observe="TE=P.TE" StartSeq="A1" StopSeq="A3"/>
+         <ATOMICSEQUENCE Name="A2">
+            <TRAPGRADPULSE Area="-A/2" Axis="GX" Name="P2" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-KMY+C*DKY" Axis="GY" Name="P3" Observe="KMY=P.KMAXy, C=C.Counter, DKY=P.DKy"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A3">
+            <TRAPGRADPULSE ADCs="NX" Axis="GX" FlatTopArea="2*KMX" FlatTopTime="4" Name="P4" Observe="KMX=P.KMAXx, NX=P.Nx" PhaseLock="1"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A4">
+            <TRAPGRADPULSE Area="1.5*A" Axis="GX" Name="P6" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-A" Axis="GY" Name="P7" Observe="A=P3.Area"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TR" DelayType="B2E" Name="D2" Observe="TR=P.TR" StartSeq="A1"/>
+      </ConcatSequence>
+   </ConcatSequence>
+</Parameters>

BIN
LF_scanner/py2jemris/sim/ismrm_abstract/spgr_64_v2/phantom_bottles.mat


BIN
LF_scanner/py2jemris/sim/sample.h5


+ 31 - 0
LF_scanner/py2jemris/sim/tse.xml

@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Parameters FOVx="64" FOVy="64" FOVz="1" Name="P" Nx="32" Ny="32" Nz="1" TE="15" TR="150">
+   <ConcatSequence Name="TSE">
+      <ConcatSequence Name="O" Observe="NY=P.Ny, R=I.Repetitions" Repetitions="NY/R">
+         <ATOMICSEQUENCE Name="A1">
+            <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="90" Name="P1"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A2">
+            <TRAPGRADPULSE Area="0.5*A" Axis="GX" Name="P2" Observe="A=P5.Area"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TE/2" DelayType="B2E" Name="D1" Observe="TE=P.TE" StartSeq="A1"/>
+         <CONCATSEQUENCE Name="I" Repetitions="4">
+            <ATOMICSEQUENCE Name="A3">
+               <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="180" InitialPhase="90" Name="P3" Refocusing="1"/>
+            </ATOMICSEQUENCE>
+            <ATOMICSEQUENCE Name="A4">
+               <TRAPGRADPULSE Area="-KMY+DKY*(CI+RI*CO)" Axis="GY" Name="P4" Observe="KMY=P.KMAXy, DKY=P.DKy, CI=I.Counter, RI=I.Repetitions, CO=O.Counter"/>
+            </ATOMICSEQUENCE>
+            <DELAYATOMICSEQUENCE Delay="TE/2" DelayType="C2C" Name="D2" Observe="TE=P.TE" StartSeq="A3" StopSeq="A5"/>
+            <ATOMICSEQUENCE Name="A5">
+               <TRAPGRADPULSE ADCs="NX" Axis="GX" FlatTopArea="2*KMX" FlatTopTime="4" Name="P5" Observe="KMX=P.KMAXx, NX=P.Nx"/>
+            </ATOMICSEQUENCE>
+            <ATOMICSEQUENCE Name="A6">
+               <TRAPGRADPULSE Area="-A" Axis="GY" Name="P6" Observe="A=P4.Area"/>
+            </ATOMICSEQUENCE>
+            <DELAYATOMICSEQUENCE Delay="TE" DelayType="B2E" Name="D3" Observe="TE=P.TE" StartSeq="A3"/>
+         </CONCATSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TR" DelayType="B2E" Name="D4" Observe="TR=P.TR" StartSeq="A1"/>
+      </ConcatSequence>
+   </ConcatSequence>
+</Parameters>

+ 4 - 0
LF_scanner/py2jemris/sim/uniform.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<CoilArray>
+  <IdealCoil/>
+</CoilArray>

BIN
LF_scanner/py2jemris/sim/utest_outputs/cylindrical.h5


BIN
LF_scanner/py2jemris/sim/utest_outputs/data32_orig.mat


+ 28 - 0
LF_scanner/py2jemris/sim/utest_outputs/gre32.xml

@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Parameters FOVx="256" FOVy="256" FOVz="1" Name="P" Nx="32" Ny="32" Nz="1" TE="10" TR="100">
+   <ConcatSequence Name="R">
+      <ConcatSequence Name="C" Observe="NY=P.Ny" Repetitions="NY">
+         <ATOMICSEQUENCE Name="A1">
+            <HARDRFPULSE Axis="RF" Duration="0.1" FlipAngle="20" InitialPhase="0" Name="P1"
+                         Observe="C=C.Counter"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TE" DelayType="C2C" Name="D1" Observe="TE=P.TE" StartSeq="A1"
+                              StopSeq="A3"/>
+         <ATOMICSEQUENCE Name="A2">
+            <TRAPGRADPULSE Area="-A/2" Axis="GX" Name="P2" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-KMY+C*DKY" Axis="GY" Name="P3"
+                           Observe="KMY=P.KMAXy, C=C.Counter, DKY=P.DKy"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A3">
+            <TRAPGRADPULSE ADCs="NX" Axis="GX" FlatTopArea="2*KMX" FlatTopTime="4" Name="P4"
+                           Observe="KMX=P.KMAXx, NX=P.Nx"
+                           PhaseLock="0"/>
+         </ATOMICSEQUENCE>
+         <ATOMICSEQUENCE Name="A4">
+            <TRAPGRADPULSE Area="1.5*A" Axis="GX" Name="P6" Observe="A=P4.Area"/>
+            <TRAPGRADPULSE Area="-A" Axis="GY" Name="P7" Observe="A=P3.Area"/>
+         </ATOMICSEQUENCE>
+         <DELAYATOMICSEQUENCE Delay="TR" DelayType="B2E" Name="D2" Observe="TR=P.TR" StartSeq="A1"/>
+      </ConcatSequence>
+   </ConcatSequence>
+</Parameters>

BIN
LF_scanner/py2jemris/sim/utest_outputs/signals.h5


+ 1 - 0
LF_scanner/py2jemris/sim/utest_outputs/simu.xml

@@ -0,0 +1 @@
+<simulate name="JEMRIS"><sample name="cylindrical" uri="cylindrical.h5" /><TXcoilarray uri="uniform.xml" /><RXcoilarray uri="uniform.xml" /><parameter ConcomitantFields="0" EvolutionPrefix="evol" EvolutionSteps="0" RandomNoise="0" /><sequence name="gre32" uri="gre32.xml" /><model name="Bloch" type="CVODE" /></simulate>

+ 4 - 0
LF_scanner/py2jemris/sim/utest_outputs/uniform.xml

@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<CoilArray>
+  <IdealCoil/>
+</CoilArray>

+ 53 - 0
LF_scanner/py2jemris/sim2xml.py

@@ -0,0 +1,53 @@
+import xml.etree.ElementTree as ET
+from io import BytesIO
+
+def sim2xml(sim_name="simu", seq="example.xml", phantom="sample.h5", Tx="uniform.xml", Rx="uniform.xml",
+            seq_name="Sequence", sample_name="Sample", out_folder_name = None):
+
+    root = ET.Element("simulate")
+    root.set("name", "JEMRIS")
+
+    sample = ET.SubElement(root, "sample")
+    sample.set("name", sample_name)
+    sample.set("uri", phantom)
+
+    TXcoilarray = ET.SubElement(root, "TXcoilarray")
+    TXcoilarray.set("uri", Tx)
+
+    RXcoilarray = ET.SubElement(root, "RXcoilarray")
+    RXcoilarray.set("uri", Rx)
+
+    parameter = ET.SubElement(root, "parameter")
+    parameter.set("RandomNoise", "0")
+    parameter.set("EvolutionSteps", "0")
+    parameter.set("EvolutionPrefix", "evol")
+    parameter.set("ConcomitantFields", "0")
+
+    sequence = ET.SubElement(root, "sequence")
+    sequence.set("name", seq_name)
+    sequence.set("uri", seq)
+
+    model = ET.SubElement(root, "model")
+    model.set("name", "Bloch")
+    model.set("type", "CVODE")
+
+    sim_tree = ET.ElementTree(root)
+    sim_out_path = out_folder_name + '/' + sim_name + '.xml'
+    sim_tree.write(sim_out_path)
+
+    return sim_out_path
+
+
+
+
+# Fig 1. Draw diagram of what py2jemris consists of
+# Fig 2. SDC Debug progress
+
+
+
+
+
+
+if __name__ == '__main__':
+    sim2xml(seq="gre_jemris_seq2xml.xml", phantom="sample.h5", Tx="uniform.xml", Rx="uniform.xml",
+            seq_name="Sequence", sample_name="Sample", out_folder_name="try_seq2xml")

+ 181 - 0
LF_scanner/py2jemris/sim_jemris.py

@@ -0,0 +1,181 @@
+# Caller script for executing a simulation with JEMRIS (prior installation required)
+# Gehua Tong, April 2020
+
+
+
+
+from seq2xml import seq2xml
+from sim2xml import sim2xml
+from recon_jemris import read_jemris_output
+from coil2xml import coil2xml
+import subprocess
+import tkinter as tk
+from tkinter.filedialog import askopenfilename
+
+#from virtualscanner.core import constants
+import h5py
+import os
+
+# Paths
+#PY2JEMRIS_SIM_PATH = constants.SERVER_SIM_BLOCH_PY2JEMRIS_PATH / 'sim'
+from scipy.io import savemat
+import time
+
+def ask_for_sim_files():
+    """Helper function for sim_jemris;
+       Asks the user for simulation files through file system selection
+
+    Returns
+    -------
+    files_list : list
+        A dictionary indicating paths to the files required to construct simu.xml
+
+    """
+    files_list = {}
+    names = ['seq_xml', 'pht_h5', 'tx_xml', 'rx_xml']
+    prompt_list = ['sequence file (.xml)', 'phantom file (.h5)','Tx file (.xml)', 'Rx file (.xml)']
+
+    for u in range(len(prompt_list)-1):
+        print(f"Pick your {prompt_list[u]}.")
+        tk.Tk().withdraw()
+        filename = askopenfilename()
+        files_list[names[u]] = filename
+
+    return files_list
+
+
+
+def run_jemris(working_folder = None):
+    """Runs JEMRIS simulation on system command line
+       Assumes that the working folder contains all required files and
+               that JEMRIS is installed and added to PATH on the operating system
+       Simply, the command "jemris simu.xml" is run and the path to signals.h5 is returned
+
+    Inputs
+    ------
+    working_folder : str
+        Working folder where the simulation is performed
+
+    Returns
+    -------
+    signal path : str or pathlib Path object
+        Path to JEMRIS simulation output data file (this file is always called signals.h5)
+
+    """
+    print("Simulating using JEMRIS ...")
+    # Always run from the py2jemris/sim directory
+    if working_folder is None:
+        working_folder = 'sim'
+    original_wd = os.getcwd()
+    os.chdir(working_folder)
+    print(os.system('dir'))
+    out = os.system('jemris simu.xml')
+    print(out)
+    os.chdir(original_wd)
+    # Find signal.h5
+    if isinstance(working_folder, str):
+        signal_path = working_folder + '/signals.h5'
+    else:
+        signal_path = working_folder / 'signals.h5' # Return the absolute signal path here
+
+
+    return signal_path
+
+def sim_jemris(list_sim_files=None, working_folder=None):
+    """Runs a JEMRIS MR simulation using given .xml and .h5 files
+              based on custom file inputs. Returns complex signal data.
+    Inputs
+    ------
+    list_sim_files : dict
+        Dictionary of paths to relevant simulation files
+    working_folder : str
+        Working folder where the simulation is performed
+
+
+    Returns
+    -------
+    output : dict
+        Complex signal data with 3 fields
+        'Mxy' : Complex representation of transverse magnetization
+        'M_vec' : 3D representation of magnetization (Mx, My, Mz)
+        'T' : Timing of readout points
+
+    """
+
+    # Use interactive option if there is no dictionary input
+    all_files_exist = False
+    while not all_files_exist:
+        try:
+            seq_xml = list_sim_files['seq_xml']
+            pht_h5 = list_sim_files['pht_h5']
+            tx_xml = list_sim_files['tx_xml']
+            rx_xml = list_sim_files['rx_xml']
+
+            all_files_exist = True
+        except:
+            list_sim_files = ask_for_sim_files()
+
+    # Extract sequence and phantom name
+    seq_name = seq_xml[seq_xml.rfind('/')+1:seq_xml.rfind('.xml')]
+    pht_name = pht_h5[pht_h5.rfind('/')+1:pht_h5.rfind('.h5')]
+
+    # Make simu.xml
+    sim2xml(sim_name='simu', seq=seq_xml, phantom=pht_h5, Tx=tx_xml, Rx=rx_xml,
+                        seq_name=seq_name, sample_name=pht_name, out_folder_name=str(working_folder))
+    # Rum JEMRIS on command line
+    signal_path = run_jemris(working_folder)
+    print(signal_path)
+
+    file_discovered = False
+    print((os.path.abspath(signal_path)))
+    while not file_discovered:
+        file_discovered = os.path.exists(os.path.abspath(signal_path))
+        print(file_discovered)
+        time.sleep(2)
+
+    Mxy_out, M_vec_out, times_out = read_jemris_output(signal_path)
+    output = {'Mxy': Mxy_out, "M_vec": M_vec_out, 'T': times_out}
+
+    return output
+
+
+
+from recon_jemris import *
+
+
+
+if __name__ == '__main__':
+
+    # JEMRIS seq.h5
+    #T   = h5read('seq.h5','/seqdiag/T');           % temporal sampling points
+    #RXP = h5read('seq.h5','/seqdiag/RXP');         % RF Receiver phase; unit: radiants; if negative, the TPOI was not an ADC
+    #TXM = h5read('seq.h5','/seqdiag/TXM');         % RF Transmitter magnitude
+    #TXP = h5read('seq.h5','/seqdiag/TXP');         % RF Transmitter phase; unit: radiants
+    #GX  = h5read('seq.h5','/seqdiag/GX');          % physical X-Gradient
+    #GY  = h5read('seq.h5','/seqdiag/GY');          % physical Y-Gradient
+    #GZ  = h5read('seq.h5','/seqdiag/GZ');          % physical Z-Gradient
+
+
+    #['seq_xml', 'pht_h5', 'tx_xml', 'rx_xml', 'working_path'
+    #output = sim_jemris()
+  #  print(output)
+   # sim2xml(seq="gre.xml", phantom="sample.h5", Tx="uniform.xml", Rx="uniform.xml",
+  #        seq_name="Sequence", sample_name="Sample", out_folder_name="sim")
+
+
+    # "Sim test" April 17 for seq2xml
+
+    # First, sim using original gre
+    list_sim_orig = {'seq_xml': 'gre32.xml', 'pht_h5': 'cylindrical.h5', 'tx_xml':'uniform.xml',
+                       'rx_xml': 'uniform.xml'}
+    out = sim_jemris(list_sim_orig, working_folder = 'sim/test0504')
+    savemat('sim/test0504/data32_orig.mat',out)
+
+
+
+    # Second, use twice converted (.xml output of seq2xml)
+    list_sim_twice = {'seq_xml': 'gre32_twice.xml', 'pht_h5': 'cylindrical.h5', 'tx_xml':'uniform.xml',
+                      'rx_xml': 'uniform.xml'}
+   # out = sim_jemris(list_sim_twice, working_folder = 'sim/test0504')
+   # savemat('sim/test0504/data32_twice.mat',out)
+

+ 37 - 0
LF_scanner/py2jemris/sim_py2jemris_ismrm2021.py

@@ -0,0 +1,37 @@
+import os
+from pulseq_jemris_simulator import simulate_pulseq_jemris, recon_jemris
+from scipy.io import savemat, loadmat
+import numpy as np
+from datetime import datetime
+
+
+# SPGR
+# Create phantom
+n = 16
+#phantom_info = {'fov': 0.25, 'N': n, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': 0}
+
+
+
+sps = 'sim/ismrm_abstract/spgr_16_pht/spgr_gspoil_N16_Ns1_TE10ms_TR50ms_FA30deg_acq_111920.seq'
+sim_name = 'ismrm_abstract\\spgr_16_pht'
+phtmaps = loadmat('sim/ismrm_abstract/spgr_16_pht/ph2bottles16.mat')
+FOV = 0.25
+N = 16
+dr = FOV/N
+t1map = np.zeros((N,N,1))
+t1map[:,:,0] = 1e-3 * phtmaps['T1map16'] # Original is in ms; convert to seconds
+t2map = np.zeros((N,N,1))
+t2map[:,:,0] = 1e-3 * phtmaps['T2map16'] # Original is in ms; convert to seconds
+pdmap = np.zeros((N,N,1))
+pdmap[:,:,0] = phtmaps['PDmap16']
+
+phantom_info = {'T1': t1map, 'T2': t2map, 'PD': pdmap,
+                'dr': dr, 'fov': FOV, 'N': N, 'type': 'custom', 'dim': 2, 'dir': 'z', 'loc': 0}
+
+
+# Simulate
+print('Starting at: ', datetime.now())
+simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name, coil_fov=0.25)
+kk, im, images = recon_jemris(file='sim/' + sim_name + '/signals.h5', dims=[n, n])
+savemat('sim/' + sim_name + '/utest_pulseq_sim_output.mat', {'images': images, 'kspace': kk, 'imspace': im})
+print('Ending at: ', datetime.now())

+ 38 - 0
LF_scanner/py2jemris/sim_seq_validation.py

@@ -0,0 +1,38 @@
+import os
+from pulseq_jemris_simulator import simulate_pulseq_jemris, recon_jemris
+from scipy.io import savemat, loadmat
+
+
+# IRSE
+n = 32
+phantom_info = {'fov': 0.25, 'N': n, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': 0}
+sps = 'sim/seq_validation/irse_32/irse32.seq'
+sim_name = 'seq_validation\\irse_32'
+
+#Simulate
+simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name, coil_fov=0.25)
+kk, im, images = recon_jemris(file='sim/' + sim_name + '/signals.h5', dims=[n, n])
+savemat('sim/' + sim_name + '/utest_pulseq_sim_output.mat', {'images': images, 'kspace': kk, 'imspace': im})
+
+
+# #
+# # TSE
+# n = 32
+# phantom_info = {'fov': 0.25, 'N': n, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': -0.08}
+# sps = 'sim/seq_validation/tse_32/tse32.seq'
+# sim_name = 'seq_validation\\tse_32'
+# # Make sequence
+# simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name, coil_fov=0.25)
+# kk, im, images = recon_jemris(file='sim/' + sim_name + '/signals.h5', dims=[n, n])
+# savemat('sim/' + sim_name + '/TSE-T2PLANE-utest_pulseq_sim_output.mat', {'images': images, 'kspace': kk, 'imspace': im})
+#
+
+# ## DWI
+# n = 32
+# phantom_info = {'fov':0.25, 'N':n, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': -0.08}
+# sps = 'sim/seq_validation/dwi_32/dwi32.seq'
+# sim_name = 'seq_validation\\tse_32'
+#
+# simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name, coil_fov=0.25)
+# kk, im, images = recon_jemris(file='sim/'+sim_name+'/signals.h5',dims=[n,n])
+# savemat('sim/'+ sim_name + '/dwi_pulseq_sim_output.mat',{'images':images, 'kspace':kk, 'imspace':im})

+ 181 - 0
LF_scanner/py2jemris/utest_py2jemris_script.py

@@ -0,0 +1,181 @@
+# Demonstrates usage of py2jemris functionalities
+# May be used for quick testing
+# Gehua Tong
+# May 18, 2020
+
+from coil2xml import coil2xml
+from seq2xml import seq2xml
+from sim_jemris import sim_jemris
+from pulseq_jemris_simulator import simulate_pulseq_jemris, create_and_save_phantom
+from recon_jemris import recon_jemris
+import phantom as pht
+from pulseq_library import make_pulseq_irse, make_pulseq_se_oblique
+
+import numpy as np
+import matplotlib.pyplot as plt
+from pypulseq.Sequence.sequence import Sequence
+from scipy.io import loadmat, savemat
+
+
+#from virtualscanner.core.constants import SERVER_SIM_BLOCH_PY2JEMRIS_PATH
+
+import os
+import h5py
+
+
+utest_path = 'sim/utest_outputs'
+sim_path = 'sim'
+
+
+def utest_coil2xml():
+    # Example on using coil2xml
+    # Generate coil using B1 maps and plot
+    # 4 channels with different B1 maps
+
+    b1 = np.ones((32,32))
+    XY = np.meshgrid(np.linspace(0,1,32), np.linspace(0,1,32))
+    X = XY[0]
+    Y = XY[1]
+
+    # Define coil sensitivity maps (complex arrays, in general)
+    b1_ch1 = np.sqrt(X**2 + Y**2)
+    b1_ch2 = np.rot90(b1_ch1)
+    b1_ch3 = np.rot90(b1_ch2)
+    b1_ch4 = np.rot90(b1_ch3)
+
+    coil2xml(b1maps=[b1_ch1, b1_ch2, b1_ch3, b1_ch4], fov=200, name='test_coil', out_folder=utest_path)
+
+    # Generate sensmaps.h5 using JEMRIS command
+    os.chdir(utest_path)
+    print(os.system('dir'))
+    out = os.system('jemris test_coil.xml')
+    os.chdir('../..')
+    print(out)
+
+
+    # Load sensmaps.h5 and plot coil
+    a = h5py.File(utest_path + '/sensmaps.h5', 'r')
+    maps_magnitude = a['maps/magnitude']
+    maps_phase = a['maps/phase']
+    plt.figure(1)
+    plt.title('Coil sensitivity maps')
+    for u in range(4):
+        plt.subplot(2,4,u+1)
+        plt.gray()
+        plt.imshow(maps_magnitude[f'0{u}'])
+        plt.title(f'Magnitude Ch #{u+1}')
+        plt.subplot(2,4,u+5)
+        plt.gray()
+        plt.imshow(maps_phase[f'0{u}'])
+        plt.title(f'Phase Ch #{u+1}')
+    plt.show()
+    return
+
+def utest_seq2xml():
+    # Make a sequence
+    seq = make_pulseq_irse(fov=0.256, n=16, thk=0.01, fa=15, tr=150, te=30, ti=10,
+                           enc='xyz', slice_locs=None, write=False)
+
+    # Convert to .xml format
+    seq2xml(seq, seq_name='irse16_pulseq', out_folder=utest_path)
+
+
+
+    # Use JEMRIS to generate sequence diagrams from .xml sequence
+    os.chdir(utest_path)
+    print(os.system('dir'))
+    out = os.system(f'jemris -x -d id=1 -f irse16_pulseq irse16_pulseq.xml')
+    print(out)
+    os.chdir('../..')
+
+    # Read sequence diagram and plot
+    data = h5py.File(utest_path + '/irse16_pulseq.h5','r')
+    diag = data['seqdiag']
+
+    t = diag['T']
+    gx = diag['GX']
+    gy = diag['GY']
+    gz = diag['GZ']
+    rxp = diag['RXP']
+    txm = diag['TXM']
+    txp = diag['TXP']
+
+    ylist = [txm, txp, gx, gy, gz, rxp]
+    title_list = ['RF Tx magnitude', 'RF Tx phase', 'Gx', 'Gy', 'Gz', 'RF Rx phase']
+    styles = ['r-', 'g-', 'k-', 'k-', 'k-', 'bx']
+    plt.figure(1)
+    for v in range(6):
+        plt.subplot(6,1,v+1)
+        plt.plot(t, ylist[v], styles[v])
+        plt.title(title_list[v])
+        plt.xlabel('Time')
+
+    plt.show()
+
+
+    return
+
+def utest_sim_jemris():
+    # Copy helping files in
+    os.chdir(sim_path)
+    out = os.system('copy uniform.xml utest_outputs')
+    print(out)
+    os.chdir('..')
+
+    utest_phantom_output_h5()
+
+    list_sim_orig = {'seq_xml': 'gre32.xml', 'pht_h5': 'cylindrical.h5', 'tx_xml':'uniform.xml',
+                       'rx_xml': 'uniform.xml'}
+    out = sim_jemris(list_sim_orig, working_folder = utest_path)
+    os.chdir(utest_path)
+    savemat('data32_orig.mat',out)
+    print('Data is saved in py2jemris/sim/utest_outputs/data32_orig.mat')
+    os.chdir('../..')
+    return
+
+def utest_pulseq_sim():
+    # TODO this !
+    # Demonstrates simulation pipeline using pulseq inputs
+
+    # Define the same phantom
+    phantom_info = {'fov': 0.256, 'N': 15, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': 0}
+    sps =  'sim/utest_outputs/se_fov256mm_Nf15_Np15_TE50ms_TR200ms_FA90deg.seq'
+    sim_name = 'utest_outputs'
+    # Make sequence
+    os.chdir(utest_path)
+    make_pulseq_se_oblique(fov=0.256,n=15, thk=0.005, tr=0.2, te=0.05, fa=90,
+                              enc='xyz', slice_locs=[0], write=True)
+
+
+    os.chdir('../..')
+    simulate_pulseq_jemris(seq_path=sps, phantom_info=phantom_info, sim_name=sim_name,
+                           coil_fov=0.256)
+
+    kk, im, images = recon_jemris(file='sim/' + sim_name + '/signals.h5', dims=[15, 15])
+
+    savemat('sim/' + sim_name + '/utest_pulseq_sim_output.mat', {'images': images, 'kspace': kk, 'imspace': im})
+    print('Simulation result is in py2jemris/sim/utest_outputs/utest_pulseq_sim_output.mat')
+
+    # Plot results
+    plt.figure(1)
+    plt.gray()
+    plt.imshow(np.squeeze(images))
+    plt.show()
+
+    return
+
+def utest_phantom_output_h5():
+    # Creates a virtual scanner phantom and save it as an .h5 file (per JEMRIS standard)
+    phantom_info = {'fov': 0.256, 'N': 32, 'type': 'cylindrical', 'dim': 2, 'dir': 'z', 'loc': 0}
+    create_and_save_phantom(phantom_info, out_folder=utest_path)
+    return
+
+
+if __name__ == '__main__':
+
+    # Run all "utests"
+    #utest_coil2xml() # Converts B1 map into .h5 and .xml files for JEMRIS
+    #utest_phantom_output_h5() # Makes a virtual scanner phantom and converts it into .h5 format for JEMRIS
+    #utest_seq2xml() # Makes a pypulseq sequence and converts it into .xml and .h5 files for JEMRIS
+    #utest_sim_jemris() # Calls JEMRIS on command line using pre-made files
+    utest_pulseq_sim() # Calls pipeline (INPUT: seq + phantom info + FOV ; OUTPUT: complex image space & k-space, images)

BIN
LF_scanner/pypulseq/SAR/QGlobal.mat


+ 325 - 0
LF_scanner/pypulseq/SAR/SAR_calc.py

@@ -0,0 +1,325 @@
+# Copyright of the Board of Trustees of Columbia University in the City of New York
+from pathlib import Path
+from typing import Tuple
+from typing import Union
+
+import matplotlib.pyplot as plt
+import numpy as np
+import numpy.matlib
+import scipy.io as sio
+from scipy import interpolate
+
+from LF_scanner.pypulseq.Sequence.sequence import Sequence
+from LF_scanner.pypulseq.calc_duration import calc_duration
+
+
+def _calc_SAR(Q: np.ndarray, I: np.ndarray) -> np.ndarray:
+    """
+    Compute the SAR output for a given Q matrix and I current values.
+
+    Parameters
+    ----------
+    Q : numpy.ndarray
+        Q matrix. Refer Graesslin, Ingmar, et al. "A specific absorption rate prediction concept for parallel
+        transmission MR." Magnetic resonance in medicine 68.5 (2012): 1664-1674.
+    I : numpy.ndarray
+        I matrix, capturing the current (in Amps) on each of the transmit channels. Refer Graesslin, Ingmar, et al. "A
+        specific absorption rate prediction concept for parallel transmission MR." Magnetic resonance in medicine
+        68.5 (2012): 1664-1674.
+
+    Returns
+    -------
+    SAR : numpy.ndarray
+       Contains the SAR value for a particular Q matrix
+    """
+
+    if len(I.shape) == 1:  # Just to fit the multi-transmit case for now, TODO
+        I = np.tile(I, (Q.shape[0], 1))  # Nc x Nt
+
+    I_fact = np.divide(np.matmul(I, np.conjugate(I).T), I.shape[1])
+    SAR_temp = np.multiply(Q, I_fact)
+    SAR = np.abs(np.sum(SAR_temp[:]))
+
+    return SAR
+
+
+def _load_Q() -> Tuple[np.ndarray, np.ndarray]:
+    """
+    Load Q matrix that is precomputed based on the VHM model for 8 channels. Refer Graesslin, Ingmar, et al. "A
+    specific absorption rate prediction concept for parallel transmission MR." Magnetic resonance in medicine 68.5
+    (2012): 1664-1674.
+
+    Returns
+    -------
+    Qtmf, Qhmf : numpy.ndarray
+        Contains the Q-matrix of global SAR values for body-mass and head-mass respectively.
+    """
+    # Load relevant Q matrices computed from the model - this code will be integrated later - starting from E fields
+    path_Q = str(Path(__file__).parent / "QGlobal.mat")
+    Q = sio.loadmat(path_Q)
+    Q = Q["Q"]
+    val = Q[0, 0]
+
+    Qtmf = val["Qtmf"]
+    Qhmf = val["Qhmf"]
+    return Qtmf, Qhmf
+
+
+def _SAR_from_seq(
+    seq: Sequence, Qtmf: np.ndarray, Qhmf: np.ndarray
+) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+    """
+    Compute global whole body and head only SAR values for the given `seq` object.
+
+    Parameters
+    ----------
+    seq : Sequence
+        Sequence object to calculate for which SAR values will be calculated.
+    Qtmf : numpy.ndarray
+        Q-matrix of global SAR values for body-mass.
+    Qhmf : numpy.ndarray
+        Q-matrix of global SAR values for head-mass.
+
+    Returns
+    -------
+    SAR_wbg : numpy.ndarray
+        SAR values for body-mass.
+    SAR_hg : numpy.ndarray
+        SAR values for head-mass.
+    t : numpy.ndarray
+        Corresponding time points.
+    """
+    # Identify RF blocks and compute SAR - 10 seconds must be less than twice and 6 minutes must be less than
+    # 4 (WB) and 3.2 (head-20)
+    block_events = seq.block_events
+    num_events = len(block_events)
+    t = np.zeros(num_events)
+    SAR_wbg = np.zeros(t.shape)
+    SAR_hg = np.zeros(t.shape)
+    t_prev = 0
+
+    for block_counter in block_events:
+        block = seq.get_block(block_counter)
+        block_dur = calc_duration(block)
+        t[block_counter - 1] = t_prev + block_dur
+        t_prev = t[block_counter - 1]
+        if hasattr(block, "rf"):  # has rf
+            rf = block.rf
+            signal = rf.signal
+            # This rf could be parallel transmit as well
+            SAR_wbg[block_counter] = _calc_SAR(Qtmf, signal)
+            SAR_hg[block_counter] = _calc_SAR(Qhmf, signal)
+
+    return SAR_wbg, SAR_hg, t
+
+
+def _SAR_interp(SAR: np.ndarray, t: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
+    """
+    Interpolate SAR values for one second resolution.
+
+    Parameters
+    ----------
+    SAR : numpy.ndarray
+        SAR values
+    t : numpy.ndarray
+        Current time points.
+
+    Returns
+    -------
+    SAR_interp : numpy.ndarray
+        Interpolated values of SAR for a temporal resolution of 1 second.
+    t_sec : numpy.ndarray
+        Time points at 1 second resolution.
+    """
+    t_sec = np.arange(1, np.floor(t[-1]) + 1, 1)
+    f = interpolate.interp1d(t, SAR)
+    SAR_interp = f(t_sec)
+    return SAR_interp, t_sec
+
+
+def _SAR_lims_check(
+    SARwbg_lim_s, SARhg_lim_s, tsec
+) -> Tuple[
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+    np.ndarray,
+]:
+    """
+    Check for SAR violations as compared to IEC 10 second and 6 minute averages;
+    returns SAR values that are interpolated for the fixed IEC time intervals.
+
+    Parameters
+    ----------
+    SARwbg_lim_s : numpy.ndarray
+    SARhg_lim_s : numpy.ndarray
+    tsec : numpy.ndarray
+
+    Returns
+    -------
+    SAR_wbg_tensec : numpy.ndarray
+    SAR_wbg_sixmin : numpy.ndarray
+    SAR_hg_tensec : numpy.ndarray
+    SAR_hg_sixmin : numpy.ndarray
+    SAR_wbg_sixmin_peak : numpy.ndarray
+    SAR_hg_sixmin_peak : numpy.ndarray
+    SAR_wbg_tensec_peak : numpy.ndarray
+    SAR_hg_tensec_peak : numpy.ndarray
+    """
+    if tsec[-1] > 10:
+        six_min_threshold_wbg = 4
+        ten_sec_threshold_wbg = 8
+
+        six_min_threshold_hg = 3.2
+        ten_sec_threshold_hg = 6.4
+
+        SAR_wbg_lim_app = np.concatenate(
+            (np.zeros(5), SARwbg_lim_s, np.zeros(5)), axis=0
+        )
+        SAR_hg_lim_app = np.concatenate((np.zeros(5), SARhg_lim_s, np.zeros(5)), axis=0)
+
+        SAR_wbg_tensec = _do_sw_sar(SAR_wbg_lim_app, tsec, 10)  # < 2  SARmax
+        SAR_hg_tensec = _do_sw_sar(SAR_hg_lim_app, tsec, 10)  # < 2 SARmax
+        SAR_wbg_tensec_peak = np.round(np.max(SAR_wbg_tensec), 2)
+        SAR_hg_tensec_peak = np.round(np.max(SAR_hg_tensec), 2)
+
+        if (np.max(SAR_wbg_tensec) > ten_sec_threshold_wbg) or (
+            np.max(SAR_hg_tensec) > ten_sec_threshold_hg
+        ):
+            print("Pulse exceeding 10 second Global SAR limits, increase TR")
+        SAR_wbg_sixmin = "NA"
+        SAR_hg_sixmin = "NA"
+        SAR_wbg_sixmin_peak = "NA"
+        SAR_hg_sixmin_peak = "NA"
+
+        if tsec[-1] > 600:
+            SAR_wbg_lim_app = np.concatenate(
+                (np.zeros(300), SARwbg_lim_s, np.zeros(300)), axis=0
+            )
+            SAR_hg_lim_app = np.concatenate(
+                (np.zeros(300), SARhg_lim_s, np.zeros(300)), axis=0
+            )
+
+            SAR_hg_sixmin = _do_sw_sar(SAR_hg_lim_app, tsec, 600)
+            SAR_wbg_sixmin = _do_sw_sar(SAR_wbg_lim_app, tsec, 600)
+            SAR_wbg_sixmin_peak = np.round(np.max(SAR_wbg_sixmin), 2)
+            SAR_hg_sixmin_peak = np.round(np.max(SAR_hg_sixmin), 2)
+
+            if (np.max(SAR_hg_sixmin) > six_min_threshold_wbg) or (
+                np.max(SAR_hg_sixmin) > six_min_threshold_hg
+            ):
+                print("Pulse exceeding 10 second Global SAR limits, increase TR")
+    else:
+        print("Need at least 10 seconds worth of sequence to calculate SAR")
+        SAR_wbg_tensec = "NA"
+        SAR_wbg_sixmin = "NA"
+        SAR_hg_tensec = "NA"
+        SAR_hg_sixmin = "NA"
+        SAR_wbg_sixmin_peak = "NA"
+        SAR_hg_sixmin_peak = "NA"
+        SAR_wbg_tensec_peak = "NA"
+        SAR_hg_tensec_peak = "NA"
+
+    return (
+        SAR_wbg_tensec,
+        SAR_wbg_sixmin,
+        SAR_hg_tensec,
+        SAR_hg_sixmin,
+        SAR_wbg_sixmin_peak,
+        SAR_hg_sixmin_peak,
+        SAR_wbg_tensec_peak,
+        SAR_hg_tensec_peak,
+    )
+
+
+def _do_sw_sar(SAR: np.ndarray, tsec: np.ndarray, t: np.ndarray) -> np.ndarray:
+    """
+    Compute a sliding window average of SAR values.
+
+    Parameters
+    ----------
+    SAR : numpy.ndarray
+        SAR values.
+    tsec : numpy.ndarray
+        Corresponding time points at 1 second resolution.
+    t : numpy.ndarray
+        Corresponding time points.
+
+    Returns
+    -------
+    SAR_timeavag : numpy.ndarray
+        Sliding window time average of SAR values.
+    """
+    SAR_time_avg = np.zeros(len(tsec) + int(t))
+    for instant in range(
+        int(t / 2), int(t / 2) + (int(tsec[-1]))
+    ):  # better to go from  -sw / 2: sw / 2
+        SAR_time_avg[instant] = (
+            sum(SAR[range(instant - int(t / 2), instant + int(t / 2) - 1)]) / t
+        )
+    SAR_time_avg = SAR_time_avg[int(t / 2) : int(t / 2) + (int(tsec[-1]))]
+    return SAR_time_avg
+
+
+def calc_SAR(file: Union[str, Path, Sequence]) -> None:
+    """
+    Compute Global SAR values on the `.seq` object for head and whole body over the specified time averages.
+
+    Parameters
+    ----------
+    file : str, Path or Seuqence
+        `.seq` file for which global SAR values will be computed. Can be path to `.seq` file as `str` or `Path`, or the
+        `Sequence` object itself.
+
+    Raises
+    ------
+    ValueError
+        If `file` is a `str` or `Path` to the `.seq` file and this file does not exist on disk.
+    """
+    if isinstance(file, (str, Path)):
+        if isinstance(file, str):
+            file = Path(file)
+
+        if file.exists() and file.is_file():
+            seq_obj = Sequence()
+            seq_obj.read(str(file))
+            seq_obj = seq_obj
+        else:
+            raise ValueError("Seq file does not exist.")
+    else:
+        seq_obj = file
+
+    Q_tmf, Q_hmf = _load_Q()
+    SAR_wbg, SAR_hg, t = _SAR_from_seq(seq_obj, Q_tmf, Q_hmf)
+    SARwbg_lim, tsec = _SAR_interp(SAR_wbg, t)
+    SARhg_lim, tsec = _SAR_interp(SAR_hg, t)
+    (
+        SAR_wbg_tensec,
+        SAR_wbg_sixmin,
+        SAR_hg_tensec,
+        SAR_hg_sixmin,
+        SAR_wbg_sixmin_peak,
+        SAR_hg_sixmin_peak,
+        SAR_wbg_tensec_peak,
+        SAR_hg_tensec_peak,
+    ) = _SAR_lims_check(SARwbg_lim, SARhg_lim, tsec)
+
+    # Plot 10 sec average SAR
+    if tsec[-1] > 10:
+        plt.plot(tsec, SAR_wbg_tensec, "x-", label="Whole Body: 10sec")
+        plt.plot(tsec, SAR_hg_tensec, ".-", label="Head only: 10sec")
+
+        # plt.plot(t, SARwbg, label='Whole Body - instant')
+        # plt.plot(t, SARhg, label='Whole Body - instant')
+
+        plt.xlabel("Time (s)")
+        plt.ylabel("SAR (W/kg)")
+        plt.title("Global SAR  - Mass Normalized -  Whole body and head only")
+
+        plt.legend()
+        plt.grid(True)
+        plt.show()

+ 0 - 0
LF_scanner/pypulseq/SAR/__init__.py


+ 0 - 0
LF_scanner/pypulseq/Sequence/__init__.py


+ 637 - 0
LF_scanner/pypulseq/Sequence/block.py

@@ -0,0 +1,637 @@
+from types import SimpleNamespace
+from typing import Tuple, List, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq.block_to_events import block_to_events
+from LF_scanner.pypulseq.compress_shape import compress_shape
+from LF_scanner.pypulseq.decompress_shape import decompress_shape
+from LF_scanner.pypulseq.event_lib import EventLibrary
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_labels
+
+
+def set_block(self, block_index: int, *args: SimpleNamespace) -> None:
+    """
+    Replace block at index with new block provided as block structure, add sequence block, or create a new block
+    from events and store at position specified by index. The block or events are provided in uncompressed form and
+    will be stored in the compressed, non-redundant internal libraries.
+
+    See also:
+    - `pypulseq.Sequence.sequence.Sequence.get_block()`
+    - `pypulseq.Sequence.sequence.Sequence.add_block()`
+
+    Parameters
+    ----------
+    block_index : int
+        Index at which block is replaced.
+    args : SimpleNamespace
+        Block or events to be replaced/added or created at `block_index`.
+
+    Raises
+    ------
+    ValueError
+        If trigger event that is passed is of unsupported control event type.
+        If delay is set for a gradient even that starts with a non-zero amplitude.
+    RuntimeError
+        If two consecutive gradients to not have the same amplitude at the connection point.
+        If the first gradient in the block does not start with 0.
+        If a gradient that doesn't end at zero is not aligned to the block boundary.
+    """
+    events = block_to_events(*args)
+    self.block_events[block_index] = np.zeros(7, dtype=np.int32)
+    duration = 0
+
+    check_g = {}  # Key-value mapping of index and  pairs of gradients/times
+    extensions = []
+
+    for event in events:
+        if not isinstance(event, float):  # If event is not a block duration
+            if event.type == "rf":
+                if hasattr(event, "id"):
+                    rf_id = event.id
+                else:
+                    rf_id, _ = register_rf_event(self, event)
+
+                self.block_events[block_index][1] = rf_id
+                duration = max(
+                    duration, event.shape_dur + event.delay + event.ringdown_time
+                )
+            elif event.type == "grad":
+                channel_num = ["x", "y", "z"].index(event.channel)
+                idx = 2 + channel_num
+
+                grad_start = (
+                    event.delay
+                    + np.floor(event.tt[0] / self.grad_raster_time + 1e-10)
+                    * self.grad_raster_time
+                )
+                grad_duration = (
+                    event.delay
+                    + np.ceil(event.tt[-1] / self.grad_raster_time - 1e-10)
+                    * self.grad_raster_time
+                )
+
+                check_g[channel_num] = SimpleNamespace()
+                check_g[channel_num].idx = idx
+                check_g[channel_num].start = np.array((grad_start, event.first))
+                check_g[channel_num].stop = np.array((grad_duration, event.last))
+
+                if hasattr(event, "id"):
+                    grad_id = event.id
+                else:
+                    grad_id, _ = register_grad_event(self, event)
+
+                self.block_events[block_index][idx] = grad_id
+                duration = np.max([duration, grad_duration])
+            elif event.type == "trap":
+                channel_num = ["x", "y", "z"].index(event.channel)
+                idx = 2 + channel_num
+
+                check_g[channel_num] = SimpleNamespace()
+                check_g[channel_num].idx = idx
+                check_g[channel_num].start = np.array((0, 0))
+                check_g[channel_num].stop = np.array(
+                    (
+                        event.delay
+                        + event.rise_time
+                        + event.fall_time
+                        + event.flat_time,
+                        0,
+                    )
+                )
+
+                if hasattr(event, "id"):
+                    trap_id = event.id
+                else:
+                    trap_id = register_grad_event(self, event)
+
+                self.block_events[block_index][idx] = trap_id
+                duration = np.max(
+                    [
+                        duration,
+                        event.delay
+                        + event.rise_time
+                        + event.flat_time
+                        + event.fall_time,
+                    ]
+                )
+            elif event.type == "adc":
+                if hasattr(event, "id"):
+                    adc_id = event.id
+                else:
+                    adc_id = register_adc_event(self, event)
+
+                self.block_events[block_index][5] = adc_id
+                duration = np.max(
+                    [
+                        duration,
+                        event.delay + event.num_samples * event.dwell + event.dead_time,
+                    ]
+                )
+            elif event.type == "delay":
+                duration = np.max([duration, event.delay])
+            elif event.type in ["output", "trigger"]:
+                if hasattr(event, "id"):
+                    event_id = event.id
+                else:
+                    event_id = register_control_event(self, event)
+
+                ext = {"type": self.get_extension_type_ID("TRIGGERS"), "ref": event_id}
+                extensions.append(ext)
+                duration = np.max([duration, event.delay + event.duration])
+            elif event.type in ["labelset", "labelinc"]:
+                if hasattr(event, "id"):
+                    label_id = event.id
+                else:
+                    label_id = register_label_event(self, event)
+
+                ext = {
+                    "type": self.get_extension_type_ID(event.type.upper()),
+                    "ref": label_id,
+                }
+                extensions.append(ext)
+
+    # =========
+    # ADD EXTENSIONS
+    # =========
+    if len(extensions) > 0:
+        """
+        Add extensions now... but it's tricky actually we need to check whether the exactly the same list of extensions
+        already exists, otherwise we have to create a new one... ooops, we have a potential problem with the key
+        mapping then... The trick is that we rely on the sorting of the extension IDs and then we can always find the
+        last one in the list by setting the reference to the next to 0 and then proceed with the other elements.
+        """
+        sort_idx = np.argsort([e["ref"] for e in extensions])
+        extensions = np.take(extensions, sort_idx)
+        all_found = True
+        extension_id = 0
+        for i in range(len(extensions)):
+            data = [extensions[i]["type"], extensions[i]["ref"], extension_id]
+            extension_id, found = self.extensions_library.find(data)
+            all_found = all_found and found
+            if not found:
+                break
+
+        if not all_found:
+            # Add the list
+            extension_id = 0
+            for i in range(len(extensions)):
+                data = [extensions[i]["type"], extensions[i]["ref"], extension_id]
+                extension_id, found = self.extensions_library.find(data)
+                if not found:
+                    self.extensions_library.insert(extension_id, data)
+
+        # Now we add the ID
+        self.block_events[block_index][6] = extension_id
+
+    # =========
+    # PERFORM GRADIENT CHECKS
+    # =========
+    for grad_to_check in check_g.values():
+
+        if (
+            abs(grad_to_check.start[1])
+            > self.system.max_slew * self.system.grad_raster_time
+        ):
+            if grad_to_check.start[0] != 0:
+                raise ValueError(
+                    "No delay allowed for gradients which start with a non-zero amplitude"
+                )
+
+            if block_index > 1:
+                prev_id = self.block_events[block_index - 1][grad_to_check.idx]
+                if prev_id != 0:
+                    prev_lib = self.grad_library.get(prev_id)
+                    prev_data = prev_lib["data"]
+                    prev_type = prev_lib["type"]
+                    if prev_type == "t":
+                        raise RuntimeError(
+                            "Two consecutive gradients need to have the same amplitude at the connection point"
+                        )
+                    elif prev_type == "g":
+                        last = prev_data[5]
+                        if (
+                            abs(last - grad_to_check.start[1])
+                            > self.system.max_slew * self.system.grad_raster_time
+                        ):
+                            raise RuntimeError(
+                                "Two consecutive gradients need to have the same amplitude at the connection point"
+                            )
+            else:
+                raise RuntimeError(
+                    "First gradient in the the first block has to start at 0."
+                )
+
+        if (
+            grad_to_check.stop[1] > self.system.max_slew * self.system.grad_raster_time
+            and abs(grad_to_check.stop[0] - duration) > 1e-7
+        ):
+            raise RuntimeError(
+                "A gradient that doesn't end at zero needs to be aligned to the block boundary."
+            )
+
+    self.block_durations[block_index] = float(duration)
+
+
+def get_block(self, block_index: int) -> SimpleNamespace:
+    """
+    Returns PyPulseq block at `block_index` position in `self.block_events`.
+
+    The block is created from the sequence data with all events and shapes decompressed.
+
+    Parameters
+    ----------
+    block_index : int
+        Index of PyPulseq block to be retrieved from `self.block_events`.
+
+    Returns
+    -------
+    block : SimpleNamespace
+        PyPulseq block at 'block_index' position in `self.block_events`.
+
+    Raises
+    ------
+    ValueError
+        If a trigger event of an unsupported control type is encountered.
+        If a label object of an unknown extension ID is encountered.
+    """
+
+    block = SimpleNamespace()
+    attrs = ["block_duration", "rf", "gx", "gy", "gz", "adc"]
+    values = [None] * len(attrs)
+    for att, val in zip(attrs, values):
+        setattr(block, att, val)
+    event_ind = self.block_events[block_index]
+
+    if event_ind[0] > 0:  # Delay
+        delay = SimpleNamespace()
+        delay.type = "delay"
+        delay.delay = self.delay_library.data[event_ind[0]][0]
+        block.delay = delay
+
+    if event_ind[1] > 0:  # RF
+        if len(self.rf_library.type) >= event_ind[1]:
+            block.rf = self.rf_from_lib_data(
+                self.rf_library.data[event_ind[1]], self.rf_library.type[event_ind[1]]
+            )
+        else:
+            block.rf = self.rf_from_lib_data(
+                self.rf_library.data[event_ind[1]]
+            )  # Undefined type/use
+
+    # Gradients
+    grad_channels = ["gx", "gy", "gz"]
+    for i in range(len(grad_channels)):
+        if event_ind[2 + i] > 0:
+            grad, compressed = SimpleNamespace(), SimpleNamespace()
+            grad_type = self.grad_library.type[event_ind[2 + i]]
+            lib_data = self.grad_library.data[event_ind[2 + i]]
+            grad.type = "trap" if grad_type == "t" else "grad"
+            grad.channel = grad_channels[i][1]
+            if grad.type == "grad":
+                amplitude = lib_data[0]
+                shape_id = lib_data[1]
+                time_id = lib_data[2]
+                delay = lib_data[3]
+                shape_data = self.shape_library.data[shape_id]
+                compressed.num_samples = shape_data[0]
+                compressed.data = shape_data[1:]
+                g = decompress_shape(compressed)
+                grad.waveform = amplitude * g
+
+                if time_id == 0:
+                    grad.tt = (np.arange(1, len(g) + 1) - 0.5) * self.grad_raster_time
+                    t_end = len(g) * self.grad_raster_time
+                else:
+                    t_shape_data = self.shape_library.data[time_id]
+                    compressed.num_samples = t_shape_data[0]
+                    compressed.data = t_shape_data[1:]
+                    grad.tt = decompress_shape(compressed) * self.grad_raster_time
+
+                    assert len(grad.waveform) == len(grad.tt)
+                    t_end = grad.tt[-1]
+
+                grad.shape_id = shape_id
+                grad.time_id = time_id
+                grad.delay = delay
+                grad.shape_dur = t_end
+                if len(lib_data) > 5:
+                    grad.first = lib_data[4]
+                    grad.last = lib_data[5]
+            else:
+                grad.amplitude = lib_data[0]
+                grad.rise_time = lib_data[1]
+                grad.flat_time = lib_data[2]
+                grad.fall_time = lib_data[3]
+                grad.delay = lib_data[4]
+                grad.area = grad.amplitude * (
+                    grad.flat_time + grad.rise_time / 2 + grad.fall_time / 2
+                )
+                grad.flat_area = grad.amplitude * grad.flat_time
+
+            setattr(block, grad_channels[i], grad)
+
+    # ADC
+    if event_ind[5] > 0:
+        lib_data = self.adc_library.data[event_ind[5]]
+        if len(lib_data) < 6:
+            lib_data = np.append(lib_data, 0)
+
+        adc = SimpleNamespace()
+        (
+            adc.num_samples,
+            adc.dwell,
+            adc.delay,
+            adc.freq_offset,
+            adc.phase_offset,
+            adc.dead_time,
+        ) = [lib_data[x] for x in range(6)]
+        adc.num_samples = int(adc.num_samples)
+        adc.type = "adc"
+        block.adc = adc
+
+    # Triggers
+    if event_ind[6] > 0:
+        # We have extensions - triggers, labels, etc.
+        next_ext_id = event_ind[6]
+        while next_ext_id != 0:
+            ext_data = self.extensions_library.data[next_ext_id]
+            # Format: ext_type, ext_id, next_ext_id
+            ext_type = self.get_extension_type_string(ext_data[0])
+
+            if ext_type == "TRIGGERS":
+                trigger_types = ["output", "trigger"]
+                data = self.trigger_library.data[ext_data[1]]
+                trigger = SimpleNamespace()
+                trigger.type = trigger_types[int(data[0]) - 1]
+                if data[0] == 1:
+                    trigger_channels = ["osc0", "osc1", "ext1"]
+                    trigger.channel = trigger_channels[int(data[1]) - 1]
+                elif data[0] == 2:
+                    trigger_channels = ["physio1", "physio2"]
+                    trigger.channel = trigger_channels[int(data[1]) - 1]
+                else:
+                    raise ValueError("Unsupported trigger event type")
+
+                trigger.delay = data[2]
+                trigger.duration = data[3]
+                # Allow for multiple triggers per block
+                if hasattr(block, "trigger"):
+                    block.trigger[len(block.trigger)] = trigger
+                else:
+                    block.trigger = {0: trigger}
+            elif ext_type in ["LABELSET", "LABELINC"]:
+                label = SimpleNamespace()
+                label.type = ext_type.lower()
+                supported_labels = get_supported_labels()
+                if ext_type == "LABELSET":
+                    data = self.label_set_library.data[ext_data[1]]
+                else:
+                    data = self.label_inc_library.data[ext_data[1]]
+
+                label.label = supported_labels[int(data[1] - 1)]
+                label.value = data[0]
+                # Allow for multiple labels per block
+                if hasattr(block, "label"):
+                    block.label[len(block.label)] = label
+                else:
+                    block.label = {0: label}
+            else:
+                raise RuntimeError(f"Unknown extension ID {ext_data[0]}")
+
+            next_ext_id = ext_data[2]
+
+    block.block_duration = self.block_durations[block_index]
+
+    return block
+
+
+def register_adc_event(self, event: EventLibrary) -> int:
+    """
+
+    Parameters
+    ----------
+    event : SimpleNamespace
+        ADC event to be registered.
+
+    Returns
+    -------
+    int
+        ID of registered ADC event.
+    """
+    data = np.array(
+        [
+            event.num_samples,
+            event.dwell,
+            np.max([event.delay, event.dead_time]),
+            event.freq_offset,
+            event.phase_offset,
+            event.dead_time,
+        ]
+    )
+    adc_id, _ = self.adc_library.find_or_insert(new_data=data)
+
+    return adc_id
+
+
+def register_control_event(self, event: SimpleNamespace) -> int:
+    """
+
+    Parameters
+    ----------
+    event : SimpleNamespace
+        Control event to be registered.
+
+    Returns
+    -------
+    int
+        ID of registered control event.
+    """
+    event_type = ["output", "trigger"].index(event.type)
+    if event_type == 0:
+        # Trigger codes supported by the Siemens interpreter as of May 2019
+        event_channel = ["osc0", "osc1", "ext1"].index(event.channel)
+    elif event_type == 1:
+        # Trigger codes supported by the Siemens interpreter as of June 2019
+        event_channel = ["physio1", "physio2"].index(event.channel)
+    else:
+        raise ValueError("Unsupported control event type")
+
+    data = [event_type + 1, event_channel + 1, event.delay, event.duration]
+    control_id, _ = self.trigger_library.find_or_insert(new_data=data)
+
+    return control_id
+
+
+def register_grad_event(
+    self, event: SimpleNamespace
+) -> Union[int, Tuple[int, List[int]]]:
+    """
+    Parameters
+    ----------
+    event : SimpleNamespace
+        Gradient event to be registered.
+
+    Returns
+    -------
+    int, [int, ...]
+        For gradient events: ID of registered gradient event, list of shape IDs
+    int
+        For trapezoid gradient events: ID of registered gradient event
+    """
+    may_exist = True
+    if event.type == "grad":
+        amplitude = np.abs(event.waveform).max()
+        if amplitude > 0:
+            fnz = event.waveform[np.nonzero(event.waveform)[0][0]]
+            amplitude *= (
+                np.sign(fnz) if fnz != 0 else 1
+            )  # Workaround for np.sign(0) = 0
+
+        if hasattr(event, "shape_IDs"):
+            shape_IDs = event.shape_IDs
+        else:
+            shape_IDs = [0, 0]
+            if amplitude != 0:
+                g = event.waveform / amplitude
+            else:
+                g = event.waveform
+            c_shape = compress_shape(g)
+            s_data = np.insert(c_shape.data, 0, c_shape.num_samples)
+            shape_IDs[0], found = self.shape_library.find_or_insert(s_data)
+            may_exist = may_exist & found
+            c_time = compress_shape(event.tt / self.grad_raster_time)
+
+            if not (
+                len(c_time.data) == 4
+                and np.all(c_time.data == [0.5, 1, 1, c_time.num_samples - 3])
+            ):
+                t_data = np.insert(c_time.data, 0, c_time.num_samples)
+                shape_IDs[1], found = self.shape_library.find_or_insert(t_data)
+                may_exist = may_exist & found
+
+        data = [amplitude, *shape_IDs, event.delay, event.first, event.last]
+    elif event.type == "trap":
+        data = np.array(
+            [
+                event.amplitude,
+                event.rise_time,
+                event.flat_time,
+                event.fall_time,
+                event.delay,
+            ]
+        )
+    else:
+        raise ValueError("Unknown gradient type passed to register_grad_event()")
+
+    if may_exist:
+        grad_id, _ = self.grad_library.find_or_insert(
+            new_data=data, data_type=event.type[0]
+        )
+    else:
+        grad_id = self.grad_library.insert(0, data, event.type[0])
+
+    if event.type == "grad":
+        return grad_id, shape_IDs
+    elif event.type == "trap":
+        return grad_id
+
+
+def register_label_event(self, event: SimpleNamespace) -> int:
+    """
+    Parameters
+    ----------
+    event : SimpleNamespace
+        ID of label event to be registered.
+
+    Returns
+    -------
+    int
+        ID of registered label event.
+    """
+
+    label_id = get_supported_labels().index(event.label) + 1
+    data = [event.value, label_id]
+    if event.type == "labelset":
+        label_id, _ = self.label_set_library.find_or_insert(new_data=data)
+    elif event.type == "labelinc":
+        label_id, _ = self.label_inc_library.find_or_insert(new_data=data)
+    else:
+        raise ValueError("Unsupported label type passed to register_label_event()")
+
+    return label_id
+
+
+def register_rf_event(self, event: SimpleNamespace) -> Tuple[int, List[int]]:
+    """
+    Parameters
+    ----------
+    event : SimpleNamespace
+        RF event to be registered.
+
+    Returns
+    -------
+    int, [int, ...]
+        ID of registered RF event, list of shape IDs
+    """
+    mag = np.abs(event.signal)
+    amplitude = np.max(mag)
+    mag /= amplitude
+    # Following line of code is a workaround for numpy's divide functions returning NaN when mathematical
+    # edge cases are encountered (eg. divide by 0)
+    mag[np.isnan(mag)] = 0
+    phase = np.angle(event.signal)
+    phase[phase < 0] += 2 * np.pi
+    phase /= 2 * np.pi
+    may_exist = True
+
+    if hasattr(event, "shape_IDs"):
+        shape_IDs = event.shape_IDs
+    else:
+        shape_IDs = [0, 0, 0]
+
+        mag_shape = compress_shape(mag)
+        data = np.insert(mag_shape.data, 0, mag_shape.num_samples)
+        shape_IDs[0], found = self.shape_library.find_or_insert(data)
+        may_exist = may_exist & found
+
+        phase_shape = compress_shape(phase)
+        data = np.insert(phase_shape.data, 0, phase_shape.num_samples)
+        shape_IDs[1], found = self.shape_library.find_or_insert(data)
+        may_exist = may_exist & found
+
+        time_shape = compress_shape(
+            event.t / self.rf_raster_time
+        )  # Time shape is stored in units of RF raster
+        if len(time_shape.data) == 4 and np.all(
+            time_shape.data == [0.5, 1, 1, time_shape.num_samples - 3]
+        ):
+            shape_IDs[2] = 0
+        else:
+            data = [time_shape.num_samples, *time_shape.data]
+            shape_IDs[2], found = self.shape_library.find_or_insert(data)
+            may_exist = may_exist & found
+
+    use = "u"  # Undefined
+    if hasattr(event, "use"):
+        if event.use in [
+            "excitation",
+            "refocusing",
+            "inversion",
+            "saturation",
+            "preparation",
+        ]:
+            use = event.use[0]
+        else:
+            use = "u"
+
+    data = np.array(
+        [amplitude, *shape_IDs, event.delay, event.freq_offset, event.phase_offset]
+    )
+
+    if may_exist:
+        rf_id, _ = self.rf_library.find_or_insert(new_data=data, data_type=use)
+    else:
+        rf_id = self.rf_library.insert(key_id=0, new_data=data, data_type=use)
+
+    return rf_id, shape_IDs

+ 179 - 0
LF_scanner/pypulseq/Sequence/calc_grad_spectrum.py

@@ -0,0 +1,179 @@
+from typing import Tuple, List, Union
+
+import numpy as np
+from scipy.signal import spectrogram
+from matplotlib import pyplot as plt
+
+
+def calculate_gradient_spectrum(
+        obj,
+        max_frequency: float = 2000,
+        window_width: float = 0.05,
+        frequency_oversampling: float = 3,
+        time_range: Union[List[float], None] = None,
+        plot: bool = True,
+        combine_mode: str = 'max',
+        use_derivative: bool = False,
+        acoustic_resonances: List[dict] = [],
+) -> Tuple[List[np.ndarray], np.ndarray, np.ndarray, np.ndarray]:
+    """
+    Calculates the gradient spectrum of the sequence. Returns a spectrogram
+    for each gradient channel, as well as a root-sum-squares combined
+    spectrogram.
+    
+    Works by splitting the sequence into windows that are 'window_width'
+    long and calculating the fourier transform of each window. Windows
+    overlap 50% with the previous and next window. When 'combine_mode' is
+    not 'none', all windows are combined into one spectrogram.
+
+    Parameters
+    ----------
+    max_frequency : float, optional
+        Maximum frequency to include in spectrograms. The default is 2000.
+    window_width : float, optional
+        Window width (in seconds). The default is 0.05.
+    frequency_oversampling : float, optional
+        Oversampling in the frequency dimension, higher values make
+        smoother spectrograms. The default is 3.
+    time_range : List[float], optional
+        Time range over which to calculate the spectrograms as a list of
+        two timepoints (in seconds) (e.g. [1, 1.5])
+        The default is None.
+    plot : bool, optional
+        Whether to plot the spectograms. The default is True.
+    combine_mode : str, optional
+        How to combine all windows into one spectrogram, options:
+            'max', 'mean', 'rss' (root-sum-of-squares), 'none' (no combination)
+        The default is 'max'.
+    use_derivative : bool, optional
+        Whether the use the derivative of the gradient waveforms instead of the
+        gradient waveforms for the gradient spectrum calculations. The default
+        is False
+    acoustic_resonances : List[dict], optional
+        Acoustic resonances as a list of dictionaries with 'frequency' and
+        'bandwidth' elements. Only used when plot==True. The default is [].
+
+    Returns
+    -------
+    spectrograms : List[np.ndarray]
+        List of spectrograms per gradient channel.
+    spectrogram_rss : np.ndarray
+        Root-sum-of-squares combined spectrogram over all gradient channels.
+    frequencies : np.ndarray
+        Frequency axis of the spectrograms.
+    times : np.ndarray
+        Time axis of the spectrograms (only relevant when combine_mode == 'none').
+
+    """
+    dt = obj.system.grad_raster_time # time raster
+    nwin = round(window_width / dt)
+    nfft = round(frequency_oversampling*nwin)
+
+    # Get gradients as piecewise-polynomials
+    gw_pp = obj.get_gradients(time_range=time_range)
+    ng = len(gw_pp)
+    max_t = max(g.x[-1] for g in gw_pp if g is not None)
+    
+    # Determine sampling points
+    if time_range == None:
+        nt = int(np.ceil(max_t/dt))
+        t = (np.arange(nt) + 0.5)*dt
+    else:
+        tmax = min(time_range[1], max_t) - max(time_range[0], 0)
+        nt = int(np.ceil(tmax/dt))
+        t = max(time_range[0], 0) + (np.arange(nt) + 0.5)*dt
+    
+    # Sample gradients
+    gw = np.zeros((ng,t.shape[0]))
+    for i in range(ng):
+        if gw_pp[i] != None:
+            gw[i] = gw_pp[i](t)
+    
+    if use_derivative:
+        gw = np.diff(gw, axis=1)
+    
+    # Calculate spectrogram for each gradient channel
+    spectrograms: List[np.ndarray] = []
+    spectrogram_rss = 0
+    
+    for i in range(ng):
+        # Use scipy to calculate the spectrograms
+        freq, times, sxx = spectrogram(gw[i],
+                                       fs=1/dt,
+                                       mode='magnitude',
+                                       nperseg=nwin,
+                                       noverlap=nwin//2,
+                                       nfft=nfft,
+                                       detrend='constant',
+                                       window=('tukey', 1))
+        mask = freq<max_frequency
+        
+        # Accumulate spectrum for all gradient channels
+        spectrogram_rss += sxx[mask]**2
+        
+        # Combine spectrogram over time axis
+        if combine_mode == 'max':
+            s = sxx[mask].max(axis=1)
+        elif combine_mode == 'mean':
+            s = sxx[mask].mean(axis=1)
+        elif combine_mode == 'rss':
+            s = np.sqrt((sxx[mask]**2).sum(axis=1))
+        elif combine_mode == 'none':
+            s = sxx[mask]
+        else:
+            raise ValueError(f'Unknown value for combine_mode: {combine_mode}, must be one of [max, mean, rss, none]')
+        
+        frequencies = freq[mask]
+        spectrograms.append(s)
+    
+    # Root-sum-of-squares combined spectrogram for all gradient channels
+    spectrogram_rss = np.sqrt(spectrogram_rss)
+    if combine_mode == 'max':
+        spectrogram_rss = spectrogram_rss.max(axis=1)
+    elif combine_mode == 'mean':
+        spectrogram_rss = spectrogram_rss.mean(axis=1)
+    elif combine_mode == 'rss':
+        spectrogram_rss = np.sqrt((spectrogram_rss**2).sum(axis=1))
+    
+    # Plot spectrograms and acoustic resonances if specified
+    if plot:
+        if combine_mode != 'none':
+            plt.figure()
+            plt.xlabel('Frequency (Hz)')
+            # According to spectrogram documentation y unit is (Hz/m)^2 / Hz = Hz/m^2, is this meaningful?
+            for s in spectrograms:
+                plt.plot(frequencies, s)
+            plt.plot(frequencies, spectrogram_rss)
+            plt.legend(['x', 'y', 'z', 'rss'])
+    
+            for res in acoustic_resonances:
+                plt.axvline(res['frequency'], color='k', linestyle='-')
+                plt.axvline(res['frequency'] - res['bandwidth']/2, color='k', linestyle='--')
+                plt.axvline(res['frequency'] + res['bandwidth']/2, color='k', linestyle='--')
+        else:
+            for s, c in zip(spectrograms, ['X', 'Y', 'Z']):
+                plt.figure()
+                plt.title(f'Spectrum {c}')
+                plt.xlabel('Time (s)')
+                plt.ylabel('Frequency (Hz)')
+                plt.imshow(abs(s[::-1]), extent=(times[0], times[-1], frequencies[0], frequencies[-1]),
+                           aspect=(times[-1]-times[0])/(frequencies[-1]-frequencies[0]))
+                
+                for res in acoustic_resonances:
+                    plt.axhline(res['frequency'], color='r', linestyle='-')
+                    plt.axhline(res['frequency'] - res['bandwidth']/2, color='r', linestyle='--')
+                    plt.axhline(res['frequency'] + res['bandwidth']/2, color='r', linestyle='--')
+            
+            plt.figure()
+            plt.title('Total spectrum')
+            plt.xlabel('Time (s)')
+            plt.ylabel('Frequency (Hz)')
+            plt.imshow(abs(spectrogram_rss[::-1]), extent=(times[0], times[-1], frequencies[0], frequencies[-1]),
+                       aspect=(times[-1]-times[0])/(frequencies[-1]-frequencies[0]))
+            
+            for res in acoustic_resonances:
+                plt.axhline(res['frequency'], color='r', linestyle='-')
+                plt.axhline(res['frequency'] - res['bandwidth']/2, color='r', linestyle='--')
+                plt.axhline(res['frequency'] + res['bandwidth']/2, color='r', linestyle='--')
+                
+    return spectrograms, spectrogram_rss, frequencies, times

+ 102 - 0
LF_scanner/pypulseq/Sequence/calc_pns.py

@@ -0,0 +1,102 @@
+import math
+from types import SimpleNamespace
+from typing import Tuple, List
+
+import matplotlib.pyplot as plt
+import LF_scanner.pypulseq as pp
+import numpy as np
+
+from LF_scanner.pypulseq import Sequence
+from LF_scanner.pypulseq.utils.safe_pns_prediction import safe_gwf_to_pns, safe_plot
+
+from LF_scanner.pypulseq.utils.siemens.readasc import readasc
+from LF_scanner.pypulseq.utils.siemens.asc_to_hw import asc_to_hw
+
+
+def calc_pns(
+        obj: Sequence,
+        hardware: SimpleNamespace,
+        time_range: List[float] = None,
+        do_plots: bool = True
+        ) -> Tuple[bool, np.array, np.ndarray, np.array]:
+    """
+    Calculate PNS using safe model implementation by Szczepankiewicz and Witzel
+    See http://github.com/filip-szczepankiewicz/safe_pns_prediction
+    
+    Returns pns levels due to respective axes (normalized to 1 and not to 100#)
+    
+    Parameters
+    ----------
+    hardware : SimpleNamespace
+        Hardware specifications. See safe_example_hw() from
+        the safe_pns_prediction package. Alternatively a text file
+        in the .asc format (Siemens) can be passed, e.g. for Prisma
+        it is MP_GPA_K2309_2250V_951A_AS82.asc (we leave it as an
+        exercise to the interested user to find were these files
+        can be acquired from)
+    do_plots : bool, optional
+        Plot the results from the PNS calculations. The default is True.
+
+    Returns
+    -------
+    ok : bool
+        Boolean flag indicating whether peak PNS is within acceptable limits
+    pns_norm : numpy.array [N]
+        PNS norm over all gradient channels, normalized to 1
+    pns_components : numpy.array [Nx3]
+        PNS levels per gradient channel
+    t_pns : np.array [N]
+        Time axis for the pns_norm and pns_components arrays
+    """
+    
+    dt = obj.grad_raster_time
+    # Get gradients as piecewise-polynomials
+    gw_pp = obj.get_gradients(time_range=time_range)
+    ng = len(gw_pp)
+    max_t = max(g.x[-1] for g in gw_pp if g != None) - 1e-10
+    
+    # Determine sampling points
+    if time_range == None:
+        nt = int(np.ceil(max_t/dt))
+        t = (np.arange(nt) + 0.5)*dt
+    else:
+        tmax = min(time_range[1], max_t) - max(time_range[0], 0)
+        nt = int(np.ceil(tmax/dt))
+        t = max(time_range[0], 0) + (np.arange(nt) + 0.5)*dt
+    
+    # Sample gradients
+    gw = np.zeros((t.shape[0], ng))
+    for i in range(ng):
+        if gw_pp[i] != None:
+            gw[:,i] = gw_pp[i](t)
+            
+    
+    if do_plots:
+        plt.figure()
+        for i in range(ng):
+            if gw_pp[i] != None:
+                plt.plot(gw_pp[i].x[1:-1], gw_pp[i].c[1,:-1])
+        plt.title('gradient wave form, in Hz/m')
+    
+    if type(hardware) == str:
+        # this loads the parameters from the provided text file
+        asc, _ = readasc(hardware)
+        hardware = asc_to_hw(asc)
+
+    # use the Szczepankiewicz' and Witzel's implementation
+    [pns_comp,res] = safe_gwf_to_pns(gw/obj.system.gamma, np.nan*np.ones(t.shape[0]), obj.grad_raster_time, hardware) # the RF vector is unused in the code inside but it is zeropaded and exported ... 
+    
+    # use the exported RF vector to detect and undo zero-padding
+    pns_comp = 0.01 * pns_comp[~np.isfinite(res.rf[1:]),:]
+    
+    # calc pns_norm and the final ok/not_ok
+    pns_norm = np.sqrt((pns_comp**2).sum(axis=1))
+    ok = all(pns_norm<1)
+    
+    # ready
+    if do_plots:
+        # plot results
+        plt.figure()
+        safe_plot(pns_comp*100, obj.grad_raster_time)
+
+    return ok, pns_norm, pns_comp, t

+ 247 - 0
LF_scanner/pypulseq/Sequence/ext_test_report.py

@@ -0,0 +1,247 @@
+import numpy as np
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.convert import convert
+
+
+def ext_test_report(self) -> str:
+    """
+    Analyze the sequence and return a text report.
+
+    Returns
+    -------
+    report : str
+
+    """
+    # Find RF pulses and list flip angles
+    flip_angles_deg = []
+    for k in self.rf_library.keys:
+        lib_data = self.rf_library.data[k]
+        if len(self.rf_library.type) >= k:
+            rf = self.rf_from_lib_data(lib_data, self.rf_library.type[k])
+        else:
+            rf = self.rf_from_lib_data(lib_data)
+        flip_angles_deg.append(
+            np.abs(np.sum(rf.signal[:-1] * (rf.t[1:] - rf.t[:-1]))) * 360
+        )
+
+    flip_angles_deg = np.unique(flip_angles_deg)
+
+    # Calculate TE, TR
+    duration, num_blocks, event_count = self.duration()
+
+    k_traj_adc, k_traj, t_excitation, t_refocusing, t_adc = self.calculate_kspacePP()
+
+    k_abs_adc = np.sqrt(np.sum(np.square(k_traj_adc), axis=0))
+    k_abs_echo, index_echo = np.min(k_abs_adc), np.argmin(k_abs_adc)
+    t_echo = t_adc[index_echo]
+    if k_abs_echo > eps:
+        i2check = []
+        # Check if ADC k-space trajectory has elements left and right to index_echo
+        if index_echo > 1:
+            i2check.append(index_echo - 1)
+        if index_echo < len(k_abs_adc):
+            i2check.append(index_echo + 1)
+
+        for a in range(len(i2check)):
+            v_i_to_0 = k_traj_adc[:, index_echo]
+            v_i_to_t = k_traj_adc[:, i2check[a]] - k_traj_adc[:, index_echo]
+            # Project v_i_to_0 to v_i_to_t
+            p_vit = np.matmul(v_i_to_0, v_i_to_t) / np.square(np.linalg.norm(v_i_to_t))
+            if p_vit > 0:
+                # We have found a bracket for the echo and the proportionality coefficient is p_vit
+                t_echo = t_adc[index_echo] * (1 - p_vit) + t_adc[i2check[a]] * p_vit
+
+    if len(t_excitation) != 0:
+        t_ex_tmp = t_excitation[t_excitation < t_echo]
+        TE = t_echo - t_ex_tmp[-1]
+    else:
+        TE = np.nan
+
+    if len(t_excitation) < 2:
+        TR = duration
+    else:
+        t_ex_tmp1 = t_excitation[t_excitation > t_echo]
+        if len(t_ex_tmp1) == 0:
+            TR = t_ex_tmp[-1] - t_ex_tmp[-2]
+        else:
+            TR = t_ex_tmp1[0] - t_ex_tmp[-1]
+
+    # Check sequence dimensionality and spatial resolution
+    k_extent = np.max(np.abs(k_traj_adc), axis=1)
+    k_scale = np.max(k_extent)
+    is_cartesian = False
+    if k_scale != 0:
+        k_bins = 4e6
+        k_threshold = k_scale / k_bins
+
+        # Detect unused dimensions and delete them
+        if np.any(k_extent < k_threshold):
+            k_traj_adc = np.delete(k_traj_adc, np.where(k_extent < k_threshold), axis=0)
+            k_extent = np.delete(k_extent, np.where(k_extent < k_threshold), axis=0)
+
+        # Bin the k-space trajectory to detect repetitions / slices
+        k_len = k_traj_adc.shape[1]
+        k_repeat = np.zeros(k_len)
+        k_storage = np.zeros(k_len)
+        k_storage_next = 0
+        k_map = dict()
+        for i in range(k_len):
+            key_string = str(
+                (k_bins + np.round(k_traj_adc[:, i] / k_threshold)).astype(np.int32)
+            )
+            k_storage_ind = k_map.get(key_string)
+            if k_storage_ind is None:
+                k_storage_ind = k_storage_next
+                k_map[key_string] = k_storage_ind
+                k_storage_next += 1
+            k_storage[k_storage_ind] = k_storage[k_storage_ind] + 1
+            k_repeat[i] = k_storage[k_storage_ind]
+
+        repeats_max = np.max(k_storage[:k_storage_next])
+        repeats_min = np.min(k_storage[:k_storage_next])
+        repeats_median = np.median(k_storage[:k_storage_next])
+        repeats_unique = np.unique(k_storage[:k_storage_next])
+        counts_unique = np.zeros_like(repeats_unique)
+        for i in range(len(repeats_unique)):
+            counts_unique[i] = np.sum(
+                repeats_unique[i] == k_storage[: k_storage_next - 1]
+            )
+
+        k_traj_rep1 = k_traj_adc[:, k_repeat == 1]
+
+        k_counters = np.zeros_like(k_traj_rep1)
+        dims = k_traj_rep1.shape[0]
+        k_map = dict()
+        for j in range(dims):
+            k_storage = np.zeros(k_len)
+            k_storage_next = 1
+
+            for i in range(k_traj_rep1.shape[1]):
+                key = np.round(k_traj_rep1[j, i] / k_threshold).astype(np.int32)
+                k_storage_ind = k_map.get(key)
+                if k_storage_ind is None:
+                    k_storage_ind = k_map.get(key + 1)
+                if k_storage_ind is None:
+                    k_storage_ind = k_map.get(key - 1)
+                if k_storage_ind is None:
+                    k_storage_ind = k_storage_next
+                    k_map[key] = k_storage_ind
+                    k_storage_next += 1
+                    k_storage[k_storage_ind] = k_traj_rep1[j, i]
+                k_counters[j, i] = k_storage_ind
+
+        unique_k_positions = np.max(k_counters, axis=1)
+        is_cartesian = np.prod(unique_k_positions) == k_traj_rep1.shape[1]
+    else:
+        unique_k_positions = 1
+
+    # gw_data = self.gradient_waveforms()
+    waveforms_and_times = self.waveforms_and_times()
+    gw_data = waveforms_and_times[0]
+    gws = np.zeros_like(gw_data)
+    ga = np.zeros(len(gw_data))
+    gs = np.zeros(len(gw_data))
+
+    common_time = np.unique(np.concatenate(gw_data, axis=1)[0])
+    gw_ct = np.zeros((len(gw_data), len(common_time)))
+    gs_ct = np.zeros((len(gw_data), len(common_time) - 1))
+    for gc in range(len(gw_data)):
+        if gw_data[gc].shape[1] > 0:
+            # Slew
+            gws[gc] = (gw_data[gc][1, 1:] - gw_data[gc][1, :-1]) / (
+                gw_data[gc][0, 1:] - gw_data[gc][0, :-1]
+            )
+
+            # Interpolate to common time
+            gw_ct[gc] = np.interp(
+                x=common_time,
+                xp=gw_data[gc][0, :],
+                fp=gw_data[gc][1, :],
+                left=0,
+                right=0,
+            )
+
+            gs_ct[gc] = (gw_ct[gc][1:] - gw_ct[gc][:-1]) / (
+                common_time[1:] - common_time[:-1]
+            )
+
+            # Max grad/slew per channel
+            ga[gc] = np.max(np.abs(gw_data[gc][1:]))
+            gs[gc] = np.max(np.abs(gws[gc]))
+
+    ga_abs = np.max(np.sqrt(np.sum(np.square(gw_ct), axis=0)))
+    gs_abs = np.max(np.sqrt(np.sum(np.square(gs_ct), axis=0)))
+
+    timing_ok, timing_error_report = self.check_timing()
+
+    report = (
+        f"Number of blocks: {num_blocks}\n"
+        f"Number of events:\n"
+        f"RF: {event_count[1]:6.0f}\n"
+        f"Gx: {event_count[2]:6.0f}\n"
+        f"Gy: {event_count[3]:6.0f}\n"
+        f"Gz: {event_count[4]:6.0f}\n"
+        f"ADC: {event_count[5]:6.0f}\n"
+        f"Delay: {event_count[0]:6.0f}\n"
+        f"Sequence duration: {duration:.6f} s\n"
+        f"TE: {TE:.6f} s\n"
+        f"TR: {TR:.6f} s\n"
+    )
+    report += (
+        "Flip angle: "
+        + ("{:.02f} " * len(flip_angles_deg)).format(*flip_angles_deg)
+        + "deg\n"
+    )
+    report += (
+        "Unique k-space positions (aka cols, rows, etc.): "
+        + ("{:.0f} " * len(unique_k_positions)).format(*unique_k_positions)
+        + "\n"
+    )
+
+    if np.any(unique_k_positions > 1):
+        report += f"Dimensions: {len(k_extent)}\n"
+        report += ("Spatial resolution: {:.02f} mm\n" * len(k_extent)).format(
+            *(0.5 / k_extent * 1e3)
+        )
+        report += f"Repetitions/slices/contrasts: {repeats_median}; range: [{repeats_min, repeats_max}]\n"
+
+        if is_cartesian:
+            report += "Cartesian encoding trajectory detected\n"
+        else:
+            report += "Non-cartesian/irregular encoding trajectory detected (eg: EPI, spiral, radial, etc.)\n"
+
+    if timing_ok:
+        report += "Event timing check passed successfully\n"
+    else:
+        report += (
+            f"Event timing check failed. Error listing follows:\n {timing_error_report}"
+        )
+
+    ga_converted = convert(from_value=ga, from_unit="Hz/m", to_unit="mT/m")
+    gs_converted = convert(from_value=gs, from_unit="Hz/m/s", to_unit="T/m/s")
+    report += (
+        "Max gradient: "
+        + ("{:.0f} " * len(ga)).format(*ga)
+        + "Hz/m == "
+        + ("{:.02f} " * len(ga_converted)).format(*ga_converted)
+        + "mT/m\n"
+    )
+    report += (
+        "Max slew rate: "
+        + ("{:.0f} " * len(gs)).format(*gs)
+        + "Hz/m/s == "
+        + ("{:.02f} " * len(ga_converted)).format(*gs_converted)
+        + "T/m/s\n"
+    )
+
+    ga_abs_converted = convert(from_value=ga_abs, from_unit="Hz/m", to_unit="mT/m")
+    gs_abs_converted = convert(from_value=gs_abs, from_unit="Hz/m/s", to_unit="T/m/s")
+    report += (
+        f"Max absolute gradient: {ga_abs:.0f} Hz/m == {ga_abs_converted:.2f} mT/m\n"
+    )
+    report += (
+        f"Max absolute slew rate: {gs_abs:g} Hz/m/s == {gs_abs_converted:.2f} T/m/s"
+    )
+
+    return report

+ 86 - 0
LF_scanner/pypulseq/Sequence/parula.py

@@ -0,0 +1,86 @@
+from matplotlib.colors import LinearSegmentedColormap
+
+
+def main(N: int) -> LinearSegmentedColormap:
+    """
+    Returns a Parula colormap to be used with matplotlib's `cycler`. `cm_data` has values copied from MATLAB for
+    `parula(64)`.
+
+    Parameters
+    ----------
+    N : int
+        Number of RGB quantization levels.
+
+    Returns
+    -------
+    LinearSegmentedColormap
+        Parula color map.
+    """
+    cm_data = [
+        [0.2422, 0.1504, 0.6603],
+        [0.25039048, 0.16499524, 0.70761429],
+        [0.25777143, 0.18178095, 0.7511381],
+        [0.26472857, 0.19775714, 0.79521429],
+        [0.27064762, 0.21467619, 0.83637143],
+        [0.27511429, 0.2342381, 0.87098571],
+        [0.2783, 0.25587143, 0.89907143],
+        [0.28033333, 0.27823333, 0.9221],
+        [0.2813381, 0.30059524, 0.94137619],
+        [0.28101429, 0.32275714, 0.95788571],
+        [0.27946667, 0.34467143, 0.97167619],
+        [0.27597143, 0.36668095, 0.98290476],
+        [0.26991429, 0.3892, 0.9906],
+        [0.26024286, 0.41232857, 0.99515714],
+        [0.24403333, 0.43583333, 0.99883333],
+        [0.22064286, 0.46025714, 0.99728571],
+        [0.19633333, 0.48471905, 0.98915238],
+        [0.18340476, 0.50737143, 0.97979524],
+        [0.17864286, 0.52885714, 0.96815714],
+        [0.1764381, 0.54990476, 0.95201905],
+        [0.16874286, 0.5702619, 0.93587143],
+        [0.154, 0.5902, 0.9218],
+        [0.14602857, 0.60911905, 0.90785714],
+        [0.13802381, 0.62762857, 0.89729048],
+        [0.12481429, 0.64592857, 0.88834286],
+        [0.11125238, 0.6635, 0.87631429],
+        [0.09520952, 0.67982857, 0.85978095],
+        [0.06887143, 0.69477143, 0.83935714],
+        [0.02966667, 0.70816667, 0.81633333],
+        [0.00357143, 0.72026667, 0.7917],
+        [0.00665714, 0.73121429, 0.76601429],
+        [0.04332857, 0.74109524, 0.73940952],
+        [0.09639524, 0.75, 0.7120381],
+        [0.14077143, 0.7584, 0.68415714],
+        [0.1717, 0.7669619, 0.65544286],
+        [0.19376667, 0.77576667, 0.6251],
+        [0.21608571, 0.7843, 0.5923],
+        [0.24695714, 0.79179524, 0.55674286],
+        [0.29061429, 0.79729048, 0.51882857],
+        [0.34064286, 0.8008, 0.47885714],
+        [0.3909, 0.80287143, 0.43544762],
+        [0.44562857, 0.80241905, 0.39091905],
+        [0.5044, 0.7993, 0.348],
+        [0.5615619, 0.79423333, 0.30448095],
+        [0.61739524, 0.78761905, 0.2612381],
+        [0.67198571, 0.77927143, 0.2227],
+        [0.7242, 0.76984286, 0.19102857],
+        [0.77383333, 0.75980476, 0.16460952],
+        [0.82031429, 0.74981429, 0.15352857],
+        [0.86343333, 0.7406, 0.15963333],
+        [0.90354286, 0.73302857, 0.17741429],
+        [0.93925714, 0.72878571, 0.20995714],
+        [0.97275714, 0.72977143, 0.23944286],
+        [0.99564762, 0.74337143, 0.23714762],
+        [0.99698571, 0.76585714, 0.21994286],
+        [0.99520476, 0.78925238, 0.2027619],
+        [0.9892, 0.81356667, 0.18853333],
+        [0.97862857, 0.83862857, 0.17655714],
+        [0.96764762, 0.8639, 0.16429048],
+        [0.96100952, 0.88901905, 0.15367619],
+        [0.95967143, 0.91345714, 0.14225714],
+        [0.96279524, 0.9373381, 0.12650952],
+        [0.96911429, 0.96062857, 0.1063619],
+        [0.9769, 0.9839, 0.0805],
+    ]
+
+    return LinearSegmentedColormap.from_list(name="parula", colors=cm_data, N=N)

+ 660 - 0
LF_scanner/pypulseq/Sequence/read_seq.py

@@ -0,0 +1,660 @@
+import re
+import warnings
+from pathlib import Path
+from types import SimpleNamespace
+from typing import Dict, Tuple, List
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.compress_shape import compress_shape
+from LF_scanner.pypulseq.decompress_shape import decompress_shape
+from LF_scanner.pypulseq.event_lib import EventLibrary
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_labels
+
+
+def read(self, path: str, detect_rf_use: bool = False, remove_duplicates: bool = True) -> None:
+    """
+    Load sequence from file - read the given filename and load sequence data into sequence object.
+
+    See also `pypulseq.Sequence.write_seq.write()`.
+
+    Parameters
+    ----------
+    path : Path
+        Path of sequence file to be read.
+    detect_rf_use : bool, default=False
+        Boolean flag to let the function infer the currently missing flags concerning the intended use of the RF pulses
+        (excitation, refocusing, etc). These are important for the k-space trajectory calculation.
+    remove_duplicates: bool, default=True
+        Remove duplicate events from the sequence after reading
+
+    Raises
+    ------
+    FileNotFoundError
+        If no sequence file is found at `path`.
+    RuntimeError
+        If incompatible sequence files are attempted to be loaded.
+    ValueError
+        If unexpected sections are encountered when loading a sequence file.
+    """
+    try:
+        input_file = open(path, "r")
+    except FileNotFoundError as e:
+        raise FileNotFoundError(e)
+
+    # Event libraries
+    self.adc_library = EventLibrary()
+    self.grad_library = EventLibrary()
+    self.label_inc_library = EventLibrary()
+    self.label_set_library = EventLibrary()
+    self.rf_library = EventLibrary()
+    self.shape_library = EventLibrary()
+    self.trigger_library = EventLibrary()
+
+    # Raster times
+    self.grad_raster_time = self.system.grad_raster_time
+    self.rf_raster_time = self.system.rf_raster_time
+
+    self.block_events = {}
+    self.definitions = {}
+    self.extension_string_idx = []
+    self.extension_numeric_idx = []
+
+    jemris_generated = False
+    version_combined = 0
+
+    # Load data from file
+    while True:
+        section = __skip_comments(input_file)
+        if section == -1:
+            break
+        if section == "[DEFINITIONS]":
+            self.definitions = __read_definitions(input_file)
+
+            # Gradient raster time
+            if "GradientRasterTime" in self.definitions:
+                self.gradient_raster_time = self.definitions["GradientRasterTime"]
+
+            # Radio frequency raster time
+            if "RadiofrequencyRasterTime" in self.definitions:
+                self.rf_raster_time = self.definitions["RadiofrequencyRasterTime"]
+
+            # ADC raster time
+            if "AdcRasterTime" in self.definitions:
+                self.adc_raster_time = self.definitions["AdcRasterTime"]
+
+            # Block duration raster
+            if "BlockDurationRaster" in self.definitions:
+                self.block_duration_raster = self.definitions["BlockDurationRaster"]
+            else:
+                warnings.warn(f"No BlockDurationRaster found in file. Using default of {self.block_duration_raster}.")
+
+        elif section == "[JEMRIS]":
+            jemris_generated = True
+        elif section == "[SIGNATURE]":
+            temp_sign_defs = __read_definitions(input_file)
+            if "Type" in temp_sign_defs:
+                self.signature_type = temp_sign_defs["Type"]
+            if "Hash" in temp_sign_defs:
+                self.signature_value = temp_sign_defs["Hash"]
+                self.signature_file = "Text"
+        elif section == "[VERSION]":
+            version_major, version_minor, version_revision = __read_version(input_file)
+
+            if version_major != self.version_major:
+                raise RuntimeError(
+                    f"Unsupported version_major: {version_major}. Expected: {self.version_major}"
+                )
+
+            version_combined = (
+                    1000000 * version_major + 1000 * version_minor + version_revision
+            )
+
+            if version_combined < 1002000:
+                raise RuntimeError(
+                    f"Unsupported version {version_major}.{version_minor}.{version_revision}, only file "
+                    f"format revision 1.2.0 and above are supported."
+                )
+
+            if version_combined < 1003001:
+                raise RuntimeError(
+                    f"Loading older Pulseq format file (version "
+                    f"{version_major}.{version_minor}.{version_revision}) some code may function not as "
+                    f"expected"
+                )
+        elif section == "[BLOCKS]":
+            if version_major == 0:
+                raise RuntimeError(
+                    "Pulseq file MUST include [VERSION] section prior to [BLOCKS] section"
+                )
+            result = __read_blocks(
+                input_file,
+                block_duration_raster=self.block_duration_raster,
+                version_combined=version_combined,
+            )
+            self.block_events, self.block_durations, delay_ind_temp = result
+        elif section == "[RF]":
+            if jemris_generated:
+                self.rf_library = __read_events(
+                    input_file, (1, 1, 1, 1, 1), event_library=self.rf_library
+                )
+            else:
+                if version_combined >= 1004000:  # 1.4.x format
+                    self.rf_library = __read_events(
+                        input_file,
+                        (1, 1, 1, 1, 1e-6, 1, 1),
+                        event_library=self.rf_library,
+                    )
+                else:  # 1.3.x and below
+                    self.rf_library = __read_events(
+                        input_file, (1, 1, 1, 1e-6, 1, 1), event_library=self.rf_library
+                    )
+        elif section == "[GRADIENTS]":
+            if version_combined >= 1004000:  # 1.4.x format
+                self.grad_library = __read_events(
+                    input_file, (1, 1, 1, 1e-6), "g", self.grad_library
+                )
+            else:  # 1.3.x and below
+                self.grad_library = __read_events(
+                    input_file, (1, 1, 1e-6), "g", self.grad_library
+                )
+        elif section == "[TRAP]":
+            if jemris_generated:
+                self.grad_library = __read_events(
+                    input_file, (1, 1e-6, 1e-6, 1e-6), "t", self.grad_library
+                )
+            else:
+                self.grad_library = __read_events(
+                    input_file, (1, 1e-6, 1e-6, 1e-6, 1e-6), "t", self.grad_library
+                )
+        elif section == "[ADC]":
+            self.adc_library = __read_events(
+                input_file, (1, 1e-9, 1e-6, 1, 1), event_library=self.adc_library, append=self.system.adc_dead_time
+            )
+        elif section == "[DELAYS]":
+            if version_combined >= 1004000:
+                raise RuntimeError(
+                    "Pulseq file revision 1.4.0 and above MUST NOT contain [DELAYS] section"
+                )
+            temp_delay_library = __read_events(input_file, (1e-6,))
+        elif section == "[SHAPES]":
+            self.shape_library = __read_shapes(
+                input_file, version_major == 1 and version_minor < 4
+            )
+        elif section == "[EXTENSIONS]":
+            self.extensions_library = __read_events(input_file)
+        else:
+            if section[:18] == "extension TRIGGERS":
+                extension_id = int(section[18:])
+                self.set_extension_string_ID("TRIGGERS", extension_id)
+                self.trigger_library = __read_events(
+                    input_file, (1, 1, 1e-6, 1e-6), event_library=self.trigger_library
+                )
+            elif section[:18] == "extension LABELSET":
+                extension_id = int(section[18:])
+                self.set_extension_string_ID("LABELSET", extension_id)
+                l1 = lambda s: int(s)
+                l2 = lambda s: get_supported_labels().index(s) + 1
+                self.label_set_library = __read_and_parse_events(input_file, l1, l2)
+            elif section[:18] == "extension LABELINC":
+                extension_id = int(section[18:])
+                self.set_extension_string_ID("LABELINC", extension_id)
+                l1 = lambda s: int(s)
+                l2 = lambda s: get_supported_labels().index(s) + 1
+                self.label_inc_library = __read_and_parse_events(input_file, l1, l2)
+            else:
+                raise ValueError(f"Unknown section code: {section}")
+
+    input_file.close()  # Close file
+
+    if version_combined < 1002000:
+        raise ValueError(
+            f"Unsupported version {version_combined}, only file format revision 1.2.0 (1002000) and above "
+            f"are supported."
+        )
+
+    # Fix blocks, gradients and RF objects imported from older versions
+    if version_combined < 1004000:
+        # Scan through RF objects
+        for i in self.rf_library.data:
+            self.rf_library.update(i, None, (
+                *self.rf_library.data[i][:3],
+                0,
+                *self.rf_library.data[i][3:]
+            ))
+
+        # Scan through the gradient objects and update 't'-s (trapezoids) und 'g'-s (free-shape gradients)
+        for i in self.grad_library.data:
+            if self.grad_library.type[i] == "t":
+                if self.grad_library.data[i][1] == 0:
+                    if (
+                            abs(self.grad_library.data[i][0]) == 0
+                            and self.grad_library.data[i][2] > 0
+                    ):
+                        d = self.grad_library.data[i]
+                        self.grad_library.update(i, None,
+                                                 (d[0], self.grad_raster_time, d[2] - self.grad_raster_time) + d[3:],
+                                                 self.grad_library.type[i])
+
+                if self.grad_library.data[i][3] == 0:
+                    if (
+                            abs(self.grad_library.data[i][0]) == 0
+                            and self.grad_library.data[i][2] > 0
+                    ):
+                        d = self.grad_library.data[i]
+                        self.grad_library.update(i, None,
+                                                 d[:2] + (d[2] - self.grad_raster_time, self.grad_raster_time) + d[4:],
+                                                 self.grad_library.type[i])
+
+            if self.grad_library.type[i] == "g":
+                self.grad_library.update(i, None, (
+                    self.grad_library.data[i][:2],
+                    0,
+                    self.grad_library.data[i][2:],
+                ), self.grad_library.type[i])
+
+        # For versions prior to 1.4.0 block_durations have not been initialized
+        self.block_durations = dict()
+        # Scan through blocks and calculate durations
+        for block_counter in self.block_events:
+            # Insert delay as temporary block_duration
+            self.block_durations[block_counter] = 0
+            if delay_ind_temp[block_counter] > 0:
+                self.block_durations[block_counter] = temp_delay_library.data[
+                    delay_ind_temp[block_counter]
+                ][0]
+
+            block = self.get_block(block_counter)
+            # Calculate actual block duration
+            self.block_durations[block_counter] = calc_duration(block)
+
+    # TODO: Is it possible to avoid expensive get_block calls here?
+    grad_channels = ["gx", "gy", "gz"]
+    grad_prev_last = np.zeros(len(grad_channels))
+    for block_counter in self.block_events:
+        block = self.get_block(block_counter)
+        block_duration = block.block_duration
+        # We also need to keep track of the event IDs because some PyPulseq files written by external software may contain
+        # repeated entries so searching by content will fail
+        event_idx = self.block_events[block_counter]
+        # Update the objects by filling in the fields not contained in the PyPulseq file
+        for j in range(len(grad_channels)):
+            grad = getattr(block, grad_channels[j])
+            if grad is None:
+                grad_prev_last[j] = 0
+                continue
+
+            if grad.type == "grad":
+                if grad.delay > 0:
+                    grad_prev_last[j] = 0
+
+                if hasattr(grad, "first"):
+                    grad_prev_last[j] = grad.last
+                    continue
+
+                amplitude_ID = event_idx[j + 2]
+                if amplitude_ID in event_idx[
+                                   2:(j + 2)]:  # We did this update for the previous channels, don't do it again.
+                    if self.use_block_cache:
+                        # Update block cache in-place using the first/last values that should now be in the grad_library
+                        grad.first = self.grad_library.data[amplitude_ID][4]
+                        grad.last = self.grad_library.data[amplitude_ID][5]
+                    continue
+
+                grad.first = grad_prev_last[j]
+                if grad.time_id != 0:
+                    grad.last = grad.waveform[-1]
+                    grad_duration = grad.delay + grad.tt[-1]
+                else:
+                    # Restore samples on the edges of the gradient raster intervals for that we need the first sample
+                    # TODO: This code does not always restore reasonable values for grad.last
+                    odd_step1 = [grad.first, *2 * grad.waveform]
+                    odd_step2 = odd_step1 * (np.mod(range(len(odd_step1)), 2) * 2 - 1)
+                    waveform_odd_rest = np.cumsum(odd_step2) * (
+                            np.mod(len(odd_step2), 2) * 2 - 1
+                    )
+                    grad.last = waveform_odd_rest[-1]
+                    grad_duration = (
+                            grad.delay + len(grad.waveform) * self.grad_raster_time
+                    )
+
+                # Bookkeeping
+                grad_prev_last[j] = grad.last
+                eps = np.finfo(np.float64).eps
+                if grad_duration + eps < block_duration:
+                    grad_prev_last[j] = 0
+
+                amplitude = self.grad_library.data[amplitude_ID][0]
+                new_data = (
+                    amplitude,
+                    grad.shape_id,
+                    grad.time_id,
+                    grad.delay,
+                    grad.first,
+                    grad.last,
+                )
+                self.grad_library.update_data(amplitude_ID, None, new_data, "g")
+
+            else:
+                grad_prev_last[j] = 0
+
+    if detect_rf_use:
+        # Find the RF pulses, list flip angles, and work around the current (rev 1.2.0) Pulseq file format limitation
+        # that the RF pulse use is not stored in the file
+        for k in self.rf_library.data:
+            lib_data = self.rf_library.data[k]
+            rf = self.rf_from_lib_data(lib_data)
+            flip_deg = np.abs(np.sum(rf.signal[:-1] * (rf.t[1:] - rf.t[:-1]))) * 360
+            offresonance_ppm = 1e6 * rf.freq_offset / self.system.B0 / self.system.gamma
+            if (
+                    flip_deg < 90.01
+            ):  # Add 0.01 degree to account for rounding errors encountered in very short RF pulses
+                self.rf_library.type[k] = "e"
+            else:
+                if (
+                        rf.shape_dur > 6e-3 and -3.5 <= offresonance_ppm <= -3.4
+                ):  # Approx -3.45
+                    self.rf_library.type[k] = "s"  # Saturation (fat-sat)
+                else:
+                    self.rf_library.type[k] = "r"
+            self.rf_library.data[k] = lib_data
+
+            # Clear block cache for all blocks that contain the modified RF event
+            for block_counter, events in self.block_events.items():
+                if events[1] == k:
+                    del self.block_cache[block_counter]
+
+    # When removing duplicates, remove and remap events in the sequence without
+    # creating a copy.
+    if remove_duplicates:
+        self.remove_duplicates(in_place=True)
+
+
+def __read_definitions(input_file) -> Dict[str, str]:
+    """
+    Read the [DEFINITIONS] section of a sequence fil and return a map of key/value entries.
+
+    Parameters
+    ----------
+    input_file : file object
+        Sequence file.
+
+    Returns
+    -------
+    definitions : dict{str, str}
+        Dict object containing key value pairs of definitions.
+    """
+    definitions = dict()
+    line = __skip_comments(input_file)
+    while line != -1 and not (line == "" or line[0] == "#"):
+        tok = line.split(" ")
+        try:  # Try converting every element into a float
+            [float(x) for x in tok[1:]]
+            value = np.array(tok[1:], dtype=float)
+            if len(value) == 1:  # Avoid array structure for single elements
+                value = value[0]
+            definitions[tok[0]] = value
+        except ValueError:  # Try clause did not work!
+            definitions[tok[0]] = line[len(tok[0]) + 1:].strip()
+        line = __strip_line(input_file)
+
+    return definitions
+
+
+def __read_version(input_file) -> Tuple[int, int, int]:
+    """
+     Read the [VERSION] section of a sequence file.
+
+    Parameters
+    ----------
+    input_file : file object
+        Sequence file.
+
+    Returns
+    -------
+    tuple
+        Major, minor and revision number.
+    """
+    line = __strip_line(input_file)
+    major, minor, revision = 0, 0, 0
+    while line != "" and line[0] != "#":
+        tok = line.split(" ")
+        if tok[0] == "major":
+            major = int(tok[1])
+        elif tok[0] == "minor":
+            minor = int(tok[1])
+        elif tok[0] == "revision":
+            if len(tok[1]) != 1:  # Example: x.y.zpostN
+                tok[1] = tok[1][0]
+            revision = int(tok[1])
+        else:
+            raise RuntimeError(
+                f"Incompatible version. Expected: {major}{minor}{revision}"
+            )
+        line = __strip_line(input_file)
+
+    return major, minor, revision
+
+
+def __read_blocks(
+        input_file, block_duration_raster: float, version_combined: int
+) -> Tuple[Dict[int, np.ndarray], List[float], List[int]]:
+    """
+    Read the [BLOCKS] section of a sequence file and return the event table.
+
+    Parameters
+    ----------
+    input_file : file
+        Sequence file
+
+    Returns
+    -------
+    event_table : dict
+        Dict object containing key value pairs of Pulseq block ID and block definition.
+    block_durations : list
+        Block durations.
+    delay_idx : list
+        Delay IDs.
+    """
+    event_table = dict()
+    block_durations = dict()
+    delay_idx = dict()
+    line = __strip_line(input_file)
+
+    while line != "" and line != "#":
+        block_events = np.fromstring(line, dtype=int, sep=" ")
+
+        if version_combined <= 1002001:
+            event_table[block_events[0]] = np.array([0, *block_events[2:], 0])
+        else:
+            event_table[block_events[0]] = np.array([0, *block_events[2:]])
+
+        delay_id = block_events[0]
+        if version_combined >= 1004000:
+            block_durations[delay_id] = block_events[1] * block_duration_raster
+        else:
+            delay_idx[delay_id] = block_events[1]
+
+        line = __strip_line(input_file)
+
+    return event_table, block_durations, delay_idx
+
+
+def __read_events(
+        input_file,
+        scale: tuple = (1,),
+        event_type: str = str(),
+        event_library: EventLibrary = None,
+        append=None
+) -> EventLibrary:
+    """
+    Read an event section of a sequence file and return a library of events.
+
+    Parameters
+    ----------
+    input_file : file object
+        Sequence file.
+    scale : list, default=(1,)
+        Scale elements according to column vector scale.
+    event_type : str, default=str()
+        Attach the type string to elements of the library.
+    event_library : EventLibrary, default=EventLibrary()
+        Append new events to the given library.
+
+    Returns
+    -------
+    event_library : EventLibrary
+        Event library containing Pulseq events.
+    """
+
+    if event_library is None:
+        event_library = EventLibrary()
+    line = __strip_line(input_file)
+
+    while line != "" and line != "#":
+        data = np.fromstring(line, dtype=float, sep=" ")
+        event_id = data[0]
+        data = tuple(data[1:] * scale)
+        if append != None:
+            data = data + (append,)
+        if event_type == "":
+            event_library.insert(key_id=event_id, new_data=data)
+        else:
+            event_library.insert(key_id=event_id, new_data=data, data_type=event_type)
+        line = __strip_line(input_file)
+
+    return event_library
+
+
+def __read_and_parse_events(input_file, *args: callable) -> EventLibrary:
+    """
+    Read an event section of a sequence file and return a library of events. Event data elements are converted using
+    the provided parser(s). Default parser is `int()`.
+
+    Parameters
+    ----------
+    input_file : file
+    args : callable
+        Event parsers.
+
+    Returns
+    -------
+    EventLibrary
+        Library of events parsed from the events section of a sequence file.
+    """
+    event_library = EventLibrary()
+    line = __strip_line(input_file)
+
+    while line != "" and line != "#":
+        datas = re.split(r"(\s+)", line)
+        datas = [d for d in datas if d != " "]
+        data = np.zeros(len(datas) - 1, dtype=np.int32)
+        event_id = int(datas[0])
+        for i in range(1, len(datas)):
+            if i > len(args):
+                data[i - 1] = int(datas[i])
+            else:
+                data[i - 1] = args[i - 1](datas[i])
+        event_library.insert(key_id=event_id, new_data=data)
+        line = __strip_line(input_file)
+
+    return event_library
+
+
+def __read_shapes(input_file, force_convert_uncompressed: bool) -> EventLibrary:
+    """
+    Read the [SHAPES] section of a sequence file and return a library of shapes.
+
+    Parameters
+    ----------
+    input_file : file
+
+    Returns
+    -------
+    shape_library : EventLibrary
+        `EventLibrary` object containing shape definitions.
+    """
+    shape_library = EventLibrary(numpy_data=True)
+
+    line = __skip_comments(input_file)
+
+    while line != -1 and (line != "" or line[0:8] == "shape_id"):
+        tok = line.split(" ")
+        shape_id = int(tok[1])
+        line = __skip_comments(input_file)
+        tok = line.split(" ")
+        num_samples = int(tok[1])
+        data = []
+        line = __skip_comments(input_file)
+        while line != "" and line != "#":
+            data.append(float(line))
+            line = __strip_line(input_file)
+        line = __skip_comments(input_file, stop_before_section=True)
+
+        # Check if conversion is needed: in v1.4.x we use length(data)==num_samples
+        # As a marker for the uncompressed (stored) data. In older versions this condition could occur by chance
+        if force_convert_uncompressed and len(data) == num_samples:
+            shape = SimpleNamespace()
+            shape.data = data
+            shape.num_samples = num_samples
+            shape = compress_shape(decompress_shape(shape, force_decompression=True))
+            data = np.array([shape.num_samples, *shape.data])
+        else:
+            data.insert(0, num_samples)
+            data = np.asarray(data)
+        shape_library.insert(key_id=shape_id, new_data=data)
+    return shape_library
+
+
+def __skip_comments(input_file, stop_before_section: bool = False) -> str:
+    """
+    Read lines of skipping blank lines and comments and return the next non-comment line.
+
+    Parameters
+    ----------
+    input_file : file
+
+    Returns
+    -------
+    line : str
+        First line in `input_file` after skipping one '#' comment block. Note: File pointer is remembered, so
+        successive calls work as expected.
+    """
+
+    temp_pos = input_file.tell()
+    line = __strip_line(input_file)
+    while line != -1 and (line == "" or line[0] == "#"):
+        temp_pos = input_file.tell()
+        line = __strip_line(input_file)
+
+    if line != -1:
+        if stop_before_section and line[0] == "[":
+            input_file.seek(temp_pos, 0)
+            next_line = ""
+        else:
+            next_line = line
+    else:
+        next_line = -1
+
+    return next_line
+
+
+def __strip_line(input_file) -> str:
+    """
+    Removes spaces and newline whitespaces.
+
+    Parameters
+    ----------
+    input_file : file
+
+    Returns
+    -------
+    line : str
+        First line in input_file after spaces and newline whitespaces have been removed. Note: File pointer is
+        remembered, and hence successive calls work as expected. Returns -1 for eof.
+    """
+    line = (
+        input_file.readline()
+    )  # If line is an empty string, end of the file has been reached
+    return line.strip() if line != "" else -1

+ 1893 - 0
LF_scanner/pypulseq/Sequence/sequence.py

@@ -0,0 +1,1893 @@
+import itertools
+import math
+from collections import OrderedDict
+from types import SimpleNamespace
+from typing import Tuple, List
+from typing import Union
+from warnings import warn
+import tkinter as tk
+
+try:
+    from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
+    from matplotlib.figure import Figure
+    import matplotlib as mpl
+    from matplotlib import pyplot as plt
+except Exception:
+    FigureCanvasTkAgg = None
+    NavigationToolbar2Tk = None
+    Figure = None
+    mpl = None
+    plt = None
+
+try:
+    from typing import Self
+except ImportError:
+    from typing import TypeVar
+    Self = TypeVar('Self', bound='Sequence')
+
+import numpy as np
+try:
+    from scipy.interpolate import PPoly
+except Exception:
+    PPoly = None
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.calc_rf_center import calc_rf_center
+from LF_scanner.pypulseq.check_timing import check_timing as ext_check_timing
+from LF_scanner.pypulseq.decompress_shape import decompress_shape
+from LF_scanner.pypulseq.event_lib import EventLibrary
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_labels
+from LF_scanner.version import major, minor, revision
+from LF_scanner.pypulseq.block_to_events import block_to_events
+from LF_scanner.pypulseq.utils.cumsum import cumsum
+from copy import deepcopy
+
+from LF_scanner.pypulseq.Sequence import block
+try:
+    from LF_scanner.pypulseq.Sequence import parula
+except Exception:
+    parula = None
+from LF_scanner.pypulseq.Sequence.ext_test_report import ext_test_report
+from LF_scanner.pypulseq.Sequence.read_seq import read
+from LF_scanner.pypulseq.Sequence.write_seq import write as write_seq
+try:
+    from LF_scanner.pypulseq.Sequence.calc_pns import calc_pns
+except Exception:
+    calc_pns = None
+try:
+    from LF_scanner.pypulseq.Sequence.calc_grad_spectrum import calculate_gradient_spectrum
+except Exception:
+    calculate_gradient_spectrum = None
+
+
+class Sequence:
+    """
+    Generate sequences and read/write sequence files. This class defines properties and methods to define a complete MR
+    sequence including RF pulses, gradients, ADC events, etc. The class provides an implementation of the open MR
+    sequence format defined by the Pulseq project. See http://pulseq.github.io/.
+
+    See also `demo_read.py`, `demo_write.py`.
+    """
+
+    version_major = int(major)
+    version_minor = int(minor)
+    version_revision = revision
+
+    def __init__(self, system=None, use_block_cache=True):
+        if system == None:
+            system = Opts()
+
+        # =========
+        # EVENT LIBRARIES
+        # =========
+        self.adc_library = EventLibrary()  # Library of ADC events
+        self.delay_library = EventLibrary()  # Library of delay events
+        # Library of extension events. Extension events form single-linked zero-terminated lists
+        self.extensions_library = EventLibrary()
+        self.grad_library = EventLibrary()  # Library of gradient events
+        self.label_inc_library = (
+            EventLibrary()
+        )  # Library of Label(inc) events (reference from the extensions library)
+        self.label_set_library = (
+            EventLibrary()
+        )  # Library of Label(set) events (reference from the extensions library)
+        self.rf_library = EventLibrary()  # Library of RF events
+        self.shape_library = EventLibrary(numpy_data=True)  # Library of compressed shapes
+        self.trigger_library = EventLibrary()  # Library of trigger events
+
+        # =========
+        # OTHER
+        # =========
+        self.system = system
+
+        self.block_events = OrderedDict()  # Event table
+        self.use_block_cache = use_block_cache
+        self.block_cache = dict()  # Block cache
+        self.next_free_block_ID = 1
+
+        self.definitions = dict()  # Optional sequence definitions
+
+        self.rf_raster_time = (
+            self.system.rf_raster_time
+        )  # RF raster time (system dependent)
+        self.grad_raster_time = (
+            self.system.grad_raster_time
+        )  # Gradient raster time (system dependent)
+        self.adc_raster_time = (
+            self.system.adc_raster_time
+        )  # ADC raster time (system dependent)
+        self.block_duration_raster = self.system.block_duration_raster
+        self.set_definition("AdcRasterTime", self.adc_raster_time)
+        self.set_definition("BlockDurationRaster", self.block_duration_raster)
+        self.set_definition("GradientRasterTime", self.grad_raster_time)
+        self.set_definition("RadiofrequencyRasterTime", self.rf_raster_time)
+        self.signature_type = ""
+        self.signature_file = ""
+        self.signature_value = ""
+
+        self.block_durations = dict()  # Cache of block durations
+        self.extension_numeric_idx = []  # numeric IDs of the used extensions
+        self.extension_string_idx = []  # string IDs of the used extensions
+
+    def __str__(self) -> str:
+        s = "Sequence:"
+        s += "\nshape_library: " + str(self.shape_library)
+        s += "\nrf_library: " + str(self.rf_library)
+        s += "\ngrad_library: " + str(self.grad_library)
+        s += "\nadc_library: " + str(self.adc_library)
+        s += "\ndelay_library: " + str(self.delay_library)
+        s += "\nextensions_library: " + str(
+            self.extensions_library
+        )  # inserted for trigger support by mveldmann
+        s += "\nrf_raster_time: " + str(self.rf_raster_time)
+        s += "\ngrad_raster_time: " + str(self.grad_raster_time)
+        s += "\nblock_events: " + str(len(self.block_events))
+        return s
+
+    def adc_times(
+            self, time_range: List[float] = None
+    ) -> Tuple[np.ndarray, np.ndarray]:
+        """
+        Return time points of ADC sampling points.
+
+        Returns
+        -------
+        t_adc: np.ndarray
+            Contains times of all ADC sample points.
+        fp_adc : np.ndarray
+            Contains frequency and phase offsets of each ADC object (not samples).
+        """
+
+        # Collect ADC timing data
+        t_adc = []
+        fp_adc = []
+
+        curr_dur = 0
+        if time_range == None:
+            blocks = self.block_events
+        else:
+            if len(time_range) != 2:
+                raise ValueError('Time range must be list of two elements')
+            if time_range[0] > time_range[1]:
+                raise ValueError('End time of time_range must be after begin time')
+
+            # Calculate end times of each block
+            bd = np.array(list(self.block_durations.values()))
+            t = np.cumsum(bd)
+            # Search block end times for start of time range
+            begin_block = np.searchsorted(t, time_range[0])
+            # Search block begin times for end of time range
+            end_block = np.searchsorted(t - bd, time_range[1], side='right')
+            blocks = list(self.block_durations.keys())[begin_block:end_block]
+            curr_dur = t[begin_block] - bd[begin_block]
+
+        for block_counter in blocks:
+            block = self.get_block(block_counter)
+
+            if block.adc is not None:  # ADC
+                t_adc.append(
+                    (np.arange(block.adc.num_samples) + 0.5) * block.adc.dwell
+                    + block.adc.delay
+                    + curr_dur
+                )
+                fp_adc.append([block.adc.freq_offset, block.adc.phase_offset])
+
+            curr_dur += self.block_durations[block_counter]
+
+        if t_adc == []:
+            # If there are no ADCs, make sure the output is the right shape
+            t_adc = np.zeros(0)
+            fp_adc = np.zeros((0, 2))
+        else:
+            t_adc = np.concatenate(t_adc)
+            fp_adc = np.array(fp_adc)
+
+        return t_adc, fp_adc
+
+    def add_block(self, *args: SimpleNamespace) -> None:
+        """
+        Add a new block/multiple events to the sequence. Adds a sequence block with provided as a block structure
+
+        See also:
+        - `pypulseq.Sequence.sequence.Sequence.set_block()`
+        - `pypulseq.make_adc.make_adc()`
+        - `pypulseq.make_trapezoid.make_trapezoid()`
+        - `pypulseq.make_sinc_pulse.make_sinc_pulse()`
+
+        Parameters
+        ----------
+        args : SimpleNamespace
+            Block structure or events to be added as a block to `Sequence`.
+        """
+        block.set_block(self, self.next_free_block_ID, *args)
+        self.next_free_block_ID += 1
+
+    def calculate_gradient_spectrum(
+            self, max_frequency: float = 2000,
+            window_width: float = 0.05,
+            frequency_oversampling: float = 3,
+            time_range: List[float] = None,
+            plot: bool = True,
+            combine_mode: str = 'max',
+            use_derivative: bool = False,
+            acoustic_resonances: List[dict] = []
+    ) -> Tuple[List[np.ndarray], np.ndarray, np.ndarray, np.ndarray]:
+        """
+        Calculates the gradient spectrum of the sequence. Returns a spectrogram
+        for each gradient channel, as well as a root-sum-squares combined
+        spectrogram.
+
+        Works by splitting the sequence into windows that are 'window_width'
+        long and calculating the fourier transform of each window. Windows
+        overlap 50% with the previous and next window. When 'combine_mode' is
+        not 'none', all windows are combined into one spectrogram.
+
+        Parameters
+        ----------
+        max_frequency : float, optional
+            Maximum frequency to include in spectrograms. The default is 2000.
+        window_width : float, optional
+            Window width (in seconds). The default is 0.05.
+        frequency_oversampling : float, optional
+            Oversampling in the frequency dimension, higher values make
+            smoother spectrograms. The default is 3.
+        time_range : List[float], optional
+            Time range over which to calculate the spectrograms as a list of
+            two timepoints (in seconds) (e.g. [1, 1.5])
+            The default is None.
+        plot : bool, optional
+            Whether to plot the spectograms. The default is True.
+        combine_mode : str, optional
+            How to combine all windows into one spectrogram, options:
+                'max', 'mean', 'sos' (root-sum-of-squares), 'none' (no combination)
+            The default is 'max'.
+        use_derivative : bool, optional
+            Whether the use the derivative of the gradient waveforms instead of the
+            gradient waveforms for the gradient spectrum calculations. The default
+            is False
+        acoustic_resonances : List[dict], optional
+            Acoustic resonances as a list of dictionaries with 'frequency' and
+            'bandwidth' elements. Only used when plot==True. The default is [].
+
+        Returns
+        -------
+        spectrograms : List[np.ndarray]
+            List of spectrograms per gradient channel.
+        spectrogram_sos : np.ndarray
+            Root-sum-of-squares combined spectrogram over all gradient channels.
+        frequencies : np.ndarray
+            Frequency axis of the spectrograms.
+        times : np.ndarray
+            Time axis of the spectrograms (only relevant when combine_mode == 'none').
+
+        """
+        return calculate_gradient_spectrum(self, max_frequency=max_frequency,
+                                           window_width=window_width,
+                                           frequency_oversampling=frequency_oversampling,
+                                           time_range=time_range,
+                                           plot=plot,
+                                           combine_mode=combine_mode,
+                                           use_derivative=use_derivative,
+                                           acoustic_resonances=acoustic_resonances)
+
+    def calculate_kspace(
+            self,
+            trajectory_delay: Union[float, List[float], np.ndarray] = 0,
+            gradient_offset: Union[float, List[float], np.ndarray] = 0
+    ) -> Tuple[np.array, np.array, List[float], List[float], np.array]:
+        """
+        Calculates the k-space trajectory of the entire pulse sequence.
+
+        Parameters
+        ----------
+        trajectory_delay : float or list, default=0
+            Compensation factor in seconds (s) to align ADC and gradients in the reconstruction.
+        gradient_offset : float or list, default=0
+            Simulates background gradients (specified in Hz/m)
+
+        Returns
+        -------
+        k_traj_adc : numpy.array
+            K-space trajectory sampled at `t_adc` timepoints.
+        k_traj : numpy.array
+            K-space trajectory of the entire pulse sequence.
+        t_excitation : List[float]
+            Excitation timepoints.
+        t_refocusing : List[float]
+            Refocusing timepoints.
+        t_adc : numpy.array
+            Sampling timepoints.
+        """
+        if np.any(np.abs(trajectory_delay) > 100e-6):
+            raise Warning(
+                f"Trajectory delay of {trajectory_delay * 1e6} us is suspiciously high"
+            )
+
+        total_duration = sum(self.block_durations.values())
+
+        t_excitation, fp_excitation, t_refocusing, _ = self.rf_times()
+        t_adc, _ = self.adc_times()
+
+        # Convert data to piecewise polynomials
+        gw_pp = self.get_gradients(trajectory_delay, gradient_offset)
+        ng = len(gw_pp)
+
+        # Calculate slice positions. For now we entirely rely on the excitation -- ignoring complicated interleaved
+        # refocused sequences
+        if len(t_excitation) > 0:
+            # Position in x, y, z
+            slice_pos = np.zeros((ng, len(t_excitation)))
+            for j in range(ng):
+                if gw_pp[j] is None:
+                    slice_pos[j] = np.nan
+                else:
+                    # Check for divisions by zero to avoid numpy warning
+                    divisor = np.array(gw_pp[j](t_excitation))
+                    slice_pos[j, divisor != 0.0] = fp_excitation[0, divisor != 0.0] / divisor[divisor != 0.0]
+                    slice_pos[j, divisor == 0.0] = np.nan
+
+            slice_pos[~np.isfinite(slice_pos)] = 0  # Reset undefined to 0
+        else:
+            slice_pos = []
+
+        # =========
+        # Integrate waveforms as PPs to produce gradient moments
+        gm_pp = []
+        tc = []
+        for i in range(ng):
+            if gw_pp[i] is None:
+                gm_pp.append(None)
+                continue
+
+            gm_pp.append(gw_pp[i].antiderivative())
+            tc.append(gm_pp[i].x)
+            # "Sample" ramps for display purposes otherwise piecewise-linear display (plot) fails
+            ii = np.flatnonzero(np.abs(gm_pp[i].c[0, :]) > eps)
+
+            # Do nothing if there are no ramps
+            if ii.shape[0] == 0:
+                continue
+
+            starts = np.int64(np.floor((gm_pp[i].x[ii] + eps) / self.grad_raster_time))
+            ends = np.int64(np.ceil((gm_pp[i].x[ii + 1] - eps) / self.grad_raster_time))
+
+            # Create all ranges starts[0]:ends[0], starts[1]:ends[1], etc.
+            lengths = ends - starts + 1
+            inds = np.ones((lengths).sum())
+            # Calculate output index where each range will start
+            start_inds = np.cumsum(np.concatenate(([0], lengths[:-1])))
+            # Create element-wise differences that will cumsum into
+            # the final indices: [starts[0], 1, 1, starts[1]-starts[0]-lengths[0]+1, 1, etc.]
+            inds[start_inds] = np.concatenate(([starts[0]], np.diff(starts) - lengths[:-1] + 1))
+
+            tc.append(np.cumsum(inds) * self.grad_raster_time)
+        if tc != []:
+            tc = np.concatenate(tc)
+
+        t_acc = 1e-10  # Temporal accuracy
+        t_acc_inv = 1 / t_acc
+        # tc = self.__flatten_jagged_arr(tc)
+        t_ktraj = t_acc * np.unique(
+            np.round(
+                t_acc_inv
+                * np.array(
+                    [
+                        *tc,
+                        0,
+                        *np.asarray(t_excitation) - 2 * self.rf_raster_time,
+                        *np.asarray(t_excitation) - self.rf_raster_time,
+                        *t_excitation,
+                        *np.asarray(t_refocusing) - self.rf_raster_time,
+                        *t_refocusing,
+                        *t_adc,
+                        total_duration,
+                    ]
+                )
+            )
+        )
+
+        i_excitation = np.searchsorted(t_ktraj, t_acc * np.round(t_acc_inv * np.asarray(t_excitation)))
+        i_refocusing = np.searchsorted(t_ktraj, t_acc * np.round(t_acc_inv * np.asarray(t_refocusing)))
+        i_adc = np.searchsorted(t_ktraj, t_acc * np.round(t_acc_inv * np.asarray(t_adc)))
+
+        i_periods = np.unique([0, *i_excitation, *i_refocusing, len(t_ktraj) - 1])
+        if len(i_excitation) > 0:
+            ii_next_excitation = 0
+        else:
+            ii_next_excitation = -1
+        if len(i_refocusing) > 0:
+            ii_next_refocusing = 0
+        else:
+            ii_next_refocusing = -1
+
+        k_traj = np.zeros((ng, len(t_ktraj)))
+        for i in range(ng):
+            if gw_pp[i] is None:
+                continue
+
+            it = np.where(np.logical_and(
+                t_ktraj >= t_acc * round(t_acc_inv * gm_pp[i].x[0]),
+                t_ktraj <= t_acc * round(t_acc_inv * gm_pp[i].x[-1]),
+            ))[0]
+            k_traj[i, it] = gm_pp[i](t_ktraj[it])
+            if t_ktraj[it[-1]] < t_ktraj[-1]:
+                k_traj[i, it[-1] + 1:] = k_traj[i, it[-1]]
+
+        # Convert gradient moments to kspace
+        dk = -k_traj[:, 0]
+        for i in range(len(i_periods) - 1):
+            i_period = i_periods[i]
+            i_period_end = i_periods[i + 1]
+            if ii_next_excitation >= 0 and i_excitation[ii_next_excitation] == i_period:
+                if abs(t_ktraj[i_period] - t_excitation[ii_next_excitation]) > t_acc:
+                    raise Warning(
+                        f"abs(t_ktraj[i_period]-t_excitation[ii_next_excitation]) < {t_acc} failed for ii_next_excitation={ii_next_excitation} error={t_ktraj(i_period) - t_excitation(ii_next_excitation)}"
+                    )
+                dk = -k_traj[:, i_period]
+                if i_period > 0:
+                    # Use nans to mark the excitation points since they interrupt the plots
+                    k_traj[:, i_period - 1] = np.nan
+                # -1 on len(i_excitation) for 0-based indexing
+                ii_next_excitation = min(len(i_excitation) - 1, ii_next_excitation + 1)
+            elif (
+                    ii_next_refocusing >= 0 and i_refocusing[ii_next_refocusing] == i_period
+            ):
+                # dk = -k_traj[:, i_period]
+                dk = -2 * k_traj[:, i_period] - dk
+                # -1 on len(i_excitation) for 0-based indexing
+                ii_next_refocusing = min(len(i_refocusing) - 1, ii_next_refocusing + 1)
+
+            k_traj[:, i_period:i_period_end] = (
+                    k_traj[:, i_period:i_period_end] + dk[:, None]
+            )
+
+        k_traj[:, i_period_end] = k_traj[:, i_period_end] + dk
+        k_traj_adc = k_traj[:, i_adc]
+
+        return k_traj_adc, k_traj, t_excitation, t_refocusing, t_adc
+
+    def calculate_kspacePP(
+            self,
+            trajectory_delay: Union[float, List[float], np.ndarray] = 0,
+            gradient_offset: Union[float, List[float], np.ndarray] = 0
+    ) -> Tuple[np.array, np.array, np.array, np.array, np.array]:
+        warn('Sequence.calculate_kspacePP has been deprecated, use calculate_kspace instead', DeprecationWarning,
+             stacklevel=2)
+        return self.calculate_kspace(trajectory_delay, gradient_offset)
+
+    def calculate_pns(
+            self,
+            hardware: SimpleNamespace,
+            time_range: List[float] = None,
+            do_plots: bool = True
+    ) -> Tuple[bool, np.array, np.ndarray, np.array]:
+        """
+        Calculate PNS using safe model implementation by Szczepankiewicz and Witzel
+        See http://github.com/filip-szczepankiewicz/safe_pns_prediction
+
+        Returns pns levels due to respective axes (normalized to 1 and not to 100#)
+
+        Parameters
+        ----------
+        hardware : SimpleNamespace
+            Hardware specifications. See safe_example_hw() from
+            the safe_pns_prediction package. Alternatively a text file
+            in the .asc format (Siemens) can be passed, e.g. for Prisma
+            it is MP_GPA_K2309_2250V_951A_AS82.asc (we leave it as an
+            exercise to the interested user to find were these files
+            can be acquired from)
+        do_plots : bool, optional
+            Plot the results from the PNS calculations. The default is True.
+
+        Returns
+        -------
+        ok : bool
+            Boolean flag indicating whether peak PNS is within acceptable limits
+        pns_norm : numpy.array [N]
+            PNS norm over all gradient channels, normalized to 1
+        pns_components : numpy.array [Nx3]
+            PNS levels per gradient channel
+        t_pns : np.array [N]
+            Time axis for the pns_norm and pns_components arrays
+        """
+        return calc_pns(self, hardware, time_range=time_range, do_plots=do_plots)
+
+    def check_timing(self) -> Tuple[bool, List[str]]:
+        """
+        Checks timing of all blocks and objects in the sequence optionally returns the detailed error log. This
+        function also modifies the sequence object by adding the field "TotalDuration" to sequence definitions.
+
+        Returns
+        -------
+        is_ok : bool
+            Boolean flag indicating timing errors.
+        error_report : str
+            Error report in case of timing errors.
+        """
+        error_report = []
+        is_ok = True
+        total_duration = 0
+
+        for block_counter in self.block_events:
+            block = self.get_block(block_counter)
+            events = block_to_events(block)
+            res, rep, duration = ext_check_timing(self.system, *events)
+            is_ok = is_ok and res
+
+            # Check the stored total block duration
+            if abs(duration - self.block_durations[block_counter]) > eps:
+                rep += "Inconsistency between the stored block duration and the duration of the block content"
+                is_ok = False
+                duration = self.block_durations[block_counter]
+
+            # Check RF dead times
+            if block.rf is not None:
+                if block.rf.delay - block.rf.dead_time < -eps:
+                    rep += (
+                        f"Delay of {block.rf.delay * 1e6} us is smaller than the RF dead time "
+                        f"{block.rf.dead_time * 1e6} us"
+                    )
+                    is_ok = False
+
+                if (
+                        block.rf.delay + block.rf.t[-1] + block.rf.ringdown_time - duration
+                        > eps
+                ):
+                    rep += (
+                        f"Time between the end of the RF pulse at {block.rf.delay + block.rf.t[-1]} and the end "
+                        f"of the block at {duration * 1e6} us is shorter than rf_ringdown_time"
+                    )
+                    is_ok = False
+
+            # Check ADC dead times
+            if block.adc is not None:
+                if block.adc.delay - self.system.adc_dead_time < -eps:
+                    rep += "adc.delay < system.adc_dead_time"
+                    is_ok = False
+
+                if (
+                        block.adc.delay
+                        + block.adc.num_samples * block.adc.dwell
+                        + self.system.adc_dead_time
+                        - duration
+                        > eps
+                ):
+                    rep += "adc: system.adc_dead_time (post-ADC) violation"
+                    is_ok = False
+
+            # Update report
+            if len(rep) != 0:
+                error_report.append(f"Event: {block_counter} - {rep}\n")
+            total_duration += duration
+
+        # Check if all the gradients in the last block are ramped down properly
+        if len(events) != 0 and all([isinstance(e, SimpleNamespace) for e in events]):
+            for e in range(len(events)):
+                if not isinstance(events[e], list) and events[e].type == "grad":
+                    if events[e].last != 0:
+                        error_report.append(
+                            f"Event: {list(self.block_events)[-1]} - Gradients do not ramp to 0 at the end of the sequence"
+                        )
+
+        self.set_definition("TotalDuration", total_duration)
+
+        return is_ok, error_report
+
+    def duration(self) -> Tuple[int, int, np.ndarray]:
+        """
+        Returns the total duration of this sequence, and the total count of blocks and events.
+
+        Returns
+        -------
+        duration : int
+            Duration of this sequence in seconds (s).
+        num_blocks : int
+            Number of blocks in this sequence.
+        event_count : np.ndarray
+            Number of events in this sequence.
+        """
+        num_blocks = len(self.block_events)
+        event_count = np.zeros(len(next(iter(self.block_events.values()))))
+        duration = 0
+        for block_counter in self.block_events:
+            event_count += self.block_events[block_counter] > 0
+            duration += self.block_durations[block_counter]
+
+        return duration, num_blocks, event_count
+
+    def evaluate_labels(
+            self,
+            init: dict = None,
+            evolution: str = 'none'
+    ) -> dict:
+        """
+        Evaluate label values of the entire sequence.
+
+        When no evolution is given, returns the label values at the end of the
+        sequence. Returns a dictionary with keys named after the labels used
+        in the sequence. Only the keys corresponding to the labels actually
+        used are created.
+        E.g. labels['LIN'] == 4
+
+        When evolution is given, labels are tracked through the sequence. See
+        below for options for different types of evolutions. The resulting
+        dictionary will contain arrays of the label values.
+        E.g. labels['LIN'] == np.array([0,1,2,3,4])
+
+        Initial values for the labels can be given with the 'init' parameter.
+        Useful if evaluating labels block-by-block.
+
+        Parameters
+        ----------
+        init : dict, optional
+            Dictionary containing initial label values. The default is None.
+        evolution : str, optional
+            Flag to specify tracking of label evolutions.
+            Must be one of: 'none', 'adc', 'label', 'blocks' (default = 'none')
+            'blocks': Return label values for all blocks.
+            'adc':    Return label values only for blocks containing ADC events.
+            'label':  Return label values only for blocks where labels are
+                      manipulated.
+
+        Returns
+        -------
+        labels : dict
+            Dictionary containing label values.
+            If evolution == 'none', the dictionary values only contains the
+            final label value.
+            Otherwise, the dictionary values are arrays of label evolutions.
+            Only the labels that are used in the sequence are created in the
+            dictionary.
+
+        """
+        labels = init or dict()
+        label_evolution = []
+
+        # TODO: MATLAB implementation includes block_range parameter. But in
+        #       general we cannot assume linear block ordering. Could include
+        #       time_range like in other sequence functions. Or a blocks
+        #       parameter to specify which blocks to loop over?
+        for block_counter in self.block_events:
+            block = self.get_block(block_counter)
+
+            if block.label is not None:
+                # Current block has labels
+                for lab in block.label.values():
+                    if lab.type == 'labelinc':
+                        # Increment label
+                        if lab.label not in labels:
+                            labels[lab.label] = 0
+
+                        labels[lab.label] += lab.value
+                    else:
+                        # Set label
+                        labels[lab.label] = lab.value
+
+                if evolution == 'label':
+                    label_evolution.append(dict(labels))
+
+            if evolution == 'blocks' or (evolution == 'adc' and block.adc is not None):
+                label_evolution.append(dict(labels))
+
+        # Convert evolutions into label dictionary
+        if len(label_evolution) > 0:
+            for lab in labels:
+                labels[lab] = np.array([e[lab] if lab in e else 0 for e in label_evolution])
+
+        return labels
+
+    def flip_grad_axis(self, axis: str) -> None:
+        """
+        Invert all gradients along the corresponding axis/channel. The function acts on all gradient objects already
+        added to the sequence object.
+
+        Parameters
+        ----------
+        axis : str
+            Gradients to invert or scale. Must be one of 'x', 'y' or 'z'.
+        """
+        self.mod_grad_axis(axis, modifier=-1)
+
+    def get_block(self, block_index: int) -> SimpleNamespace:
+        """
+        Return a block of the sequence  specified by the index. The block is created from the sequence data with all
+        events and shapes decompressed.
+
+        See also:
+        - `pypulseq.Sequence.sequence.Sequence.set_block()`.
+        - `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+        Parameters
+        ----------
+        block_index : int
+            Index of block to be retrieved from `Sequence`.
+
+        Returns
+        -------
+        SimpleNamespace
+            Event identified by `block_index`.
+        """
+        return block.get_block(self, block_index)
+
+    def get_definition(self, key: str) -> str:
+        """
+        Return value of the definition specified by the key. These definitions can be added manually or read from the
+        header of a sequence file defined in the sequence header. An empty array is returned if the key is not defined.
+
+        See also `pypulseq.Sequence.sequence.Sequence.set_definition()`.
+
+        Parameters
+        ----------
+        key : str
+            Key of definition to retrieve.
+
+        Returns
+        -------
+        str
+            Definition identified by `key` if found, else returns ''.
+        """
+        if key in self.definitions:
+            return self.definitions[key]
+        else:
+            return ""
+
+    def get_extension_type_ID(self, extension_string: str) -> int:
+        """
+        Get numeric extension ID for `extension_string`. Will automatically create a new ID if unknown.
+
+        Parameters
+        ----------
+        extension_string : str
+            Given string extension ID.
+
+        Returns
+        -------
+        extension_id : int
+            Numeric ID for given string extension ID.
+
+        """
+        if extension_string not in self.extension_string_idx:
+            if len(self.extension_numeric_idx) == 0:
+                extension_id = 1
+            else:
+                extension_id = 1 + max(self.extension_numeric_idx)
+
+            self.extension_numeric_idx.append(extension_id)
+            self.extension_string_idx.append(extension_string)
+            assert len(self.extension_numeric_idx) == len(self.extension_string_idx)
+        else:
+            num = self.extension_string_idx.index(extension_string)
+            extension_id = self.extension_numeric_idx[num]
+
+        return extension_id
+
+    def get_extension_type_string(self, extension_id: int) -> str:
+        """
+        Get string extension ID for `extension_id`.
+
+        Parameters
+        ----------
+        extension_id : int
+            Given numeric extension ID.
+
+        Returns
+        -------
+        extension_str : str
+            String ID for the given numeric extension ID.
+
+        Raises
+        ------
+        ValueError
+            If given numeric extension ID is unknown.
+        """
+        if extension_id in self.extension_numeric_idx:
+            num = self.extension_numeric_idx.index(extension_id)
+        else:
+            raise ValueError(
+                f"Extension for the given ID - {extension_id} - is unknown."
+            )
+
+        extension_str = self.extension_string_idx[num]
+        return extension_str
+
+    def get_gradients(self,
+                      trajectory_delay: Union[float, List[float], np.ndarray] = 0,
+                      gradient_offset: Union[float, List[float], np.ndarray] = 0,
+                      time_range: List[float] = None) -> List[PPoly]:
+        """
+        Get all gradient waveforms of the sequence in a piecewise-polynomial
+        format (scipy PPoly). Gradient values can be accessed easily at one or
+        more timepoints using `gw_pp[channel](t)` (where t is a float, list of
+        floats, or numpy array). Note that PPoly objects return nan for
+        timepoints outside the waveform.
+
+        Parameters
+        ----------
+        trajectory_delay : float or list, default=0
+            Compensation factor in seconds (s) to align ADC and gradients in the reconstruction.
+        gradient_offset : float or list, default=0
+            Simulates background gradients (specified in Hz/m)
+
+        Returns
+        -------
+        gw_pp : List[PPoly]
+            List of gradient waveforms for each of the gradient channels,
+            expressed as scipy PPoly objects.
+        """
+        if np.any(np.abs(trajectory_delay) > 100e-6):
+            raise Warning(
+                f"Trajectory delay of {trajectory_delay * 1e6} us is suspiciously high"
+            )
+
+        total_duration = sum(self.block_durations.values())
+
+        gw_data = self.waveforms(time_range=time_range)
+        ng = len(gw_data)
+
+        # Gradient delay handling
+        if isinstance(trajectory_delay, (int, float)):
+            gradient_delays = [trajectory_delay] * ng
+        else:
+            assert (len(trajectory_delay) == ng)  # Need to have same number of gradient channels
+            gradient_delays = [trajectory_delay] * ng
+
+        # Gradient offset handling
+        if isinstance(gradient_offset, (int, float)):
+            gradient_offset = [gradient_offset] * ng
+        else:
+            assert (len(gradient_offset) == ng)  # Need to have same number of gradient channels
+
+        # Convert data to piecewise polynomials
+        gw_pp = []
+        for j in range(ng):
+            wave_cnt = gw_data[j].shape[1]
+            if wave_cnt == 0:
+                if np.abs(gradient_offset[j]) <= eps:
+                    gw_pp.append(None)
+                    continue
+                else:
+                    gw = np.array(([0, total_duration], [0, 0]))
+            else:
+                gw = gw_data[j]
+
+            # Now gw contains the waveform from the current axis
+            if np.abs(gradient_delays[j]) > eps:
+                gw[0] = gw[0] - gradient_delays[j]  # Anisotropic gradient delay support
+            if not np.all(np.isfinite(gw)):
+                raise Warning("Not all elements of the generated waveform are finite.")
+
+            teps = 1e-12
+            _temp1 = np.array(([gw[0, 0] - 2 * teps, gw[0, 0] - teps], [0, 0]))
+            _temp2 = np.array(([gw[0, -1] + teps, gw[0, -1] + 2 * teps], [0, 0]))
+            gw = np.hstack((_temp1, gw, _temp2))
+
+            if np.abs(gradient_offset[j]) > eps:
+                gw[1, :] += gradient_offset[j]
+
+            gw[1][gw[1] == -0.0] = 0.0
+
+            gw_pp.append(PPoly(np.stack((np.diff(gw[1]) / np.diff(gw[0]),
+                                         gw[1][:-1])), gw[0], extrapolate=True))
+        return gw_pp
+
+    def mod_grad_axis(self, axis: str, modifier: int) -> None:
+        """
+        Invert or scale all gradients along the corresponding axis/channel. The function acts on all gradient objects
+        already added to the sequence object.
+
+        Parameters
+        ----------
+        axis : str
+            Gradients to invert or scale. Must be one of 'x', 'y' or 'z'.
+        modifier : int
+            Scaling value.
+
+        Raises
+        ------
+        ValueError
+            If invalid `axis` is passed. Must be one of 'x', 'y','z'.
+        RuntimeError
+            If same gradient event is used on multiple axes.
+        """
+        if axis not in ["x", "y", "z"]:
+            raise ValueError(
+                f"Invalid axis. Must be one of 'x', 'y','z'. Passed: {axis}"
+            )
+
+        channel_num = ["x", "y", "z"].index(axis)
+        other_channels = [0, 1, 2]
+        other_channels.remove(channel_num)
+
+        # Go through all event table entries and list gradient objects in the library
+        all_grad_events = np.array(list(self.block_events.values()))
+        all_grad_events = all_grad_events[:, 2:5]
+
+        selected_events = np.unique(all_grad_events[:, channel_num])
+        selected_events = selected_events[selected_events != 0]
+        other_events = np.unique(all_grad_events[:, other_channels])
+        if len(np.intersect1d(selected_events, other_events)) > 0:
+            raise RuntimeError(
+                "mod_grad_axis does not yet support the same gradient event used on multiple axes."
+            )
+
+        for i in range(len(selected_events)):
+            self.grad_library.data[selected_events[i]][0] *= modifier
+            if (
+                    self.grad_library.type[selected_events[i]] == "g"
+                    and self.grad_library.lengths[selected_events[i]] == 5
+            ):
+                # Need to update first and last fields
+                self.grad_library.data[selected_events[i]][3] *= modifier
+                self.grad_library.data[selected_events[i]][4] *= modifier
+
+    def plot(
+            self, tk_obj,
+            label: str = str(),
+            show_blocks: bool = False,
+            save: bool = False,
+            time_range=(0, np.inf),
+            time_disp: str = "s",
+            grad_disp: str = "kHz/m",
+            plot_now: bool = True,
+            tk_plot: bool = True
+    ) -> None:
+        """
+        Plot `Sequence`.
+
+        Parameters
+        ----------
+        label : str, defualt=str()
+            Plot label values for ADC events: in this example for LIN and REP labels; other valid labes are accepted as
+            a comma-separated list.
+        save : bool, default=False
+            Boolean flag indicating if plots should be saved. The two figures will be saved as JPG with numerical
+            suffixes to the filename 'seq_plot'.
+        show_blocks : bool, default=False
+            Boolean flag to indicate if grid and tick labels at the block boundaries are to be plotted.
+        time_range : iterable, default=(0, np.inf)
+            Time range (x-axis limits) for plotting the sequence. Default is 0 to infinity (entire sequence).
+        time_disp : str, default='s'
+            Time display type, must be one of `s`, `ms` or `us`.
+        grad_disp : str, default='s'
+            Gradient display unit, must be one of `kHz/m` or `mT/m`.
+        plot_now : bool, default=True
+            If true, function immediately shows the plots, blocking the rest of the code until plots are exited.
+            If false, plots are shown when plt.show() is called. Useful if plots are to be modified.
+        plot_type : str, default='Gradient'
+            Gradients display type, must be one of either 'Gradient' or 'Kspace'.
+        """
+        mpl.rcParams["lines.linewidth"] = 0.75  # Set default Matplotlib linewidth
+
+        valid_time_units = ["s", "ms", "us"]
+        valid_grad_units = ["kHz/m", "mT/m"]
+        valid_labels = get_supported_labels()
+        if (
+                not all([isinstance(x, (int, float)) for x in time_range])
+                or len(time_range) != 2
+        ):
+            raise ValueError("Invalid time range")
+        if time_disp not in valid_time_units:
+            raise ValueError("Unsupported time unit")
+
+        if grad_disp not in valid_grad_units:
+            raise ValueError(
+                "Unsupported gradient unit. Supported gradient units are: "
+                + str(valid_grad_units)
+            )
+
+        fig1, fig2 = plt.figure(1), plt.figure(2)
+
+        sp11 = fig1.add_subplot(311)
+        sp12 = fig1.add_subplot(312, sharex=sp11)
+        sp13 = fig1.add_subplot(313, sharex=sp11)
+        fig2_subplots = [
+            fig2.add_subplot(311, sharex=sp11),
+            fig2.add_subplot(312, sharex=sp11),
+            fig2.add_subplot(313, sharex=sp11),
+        ]
+
+        t_factor_list = [1, 1e3, 1e6]
+        t_factor = t_factor_list[valid_time_units.index(time_disp)]
+
+        g_factor_list = [1e-3, 1e3 / self.system.gamma]
+        g_factor = g_factor_list[valid_grad_units.index(grad_disp)]
+
+        t0 = 0
+        label_defined = False
+        label_idx_to_plot = []
+        label_legend_to_plot = []
+        label_store = dict()
+        for i in range(len(valid_labels)):
+            label_store[valid_labels[i]] = 0
+            if valid_labels[i] in label.upper():
+                label_idx_to_plot.append(i)
+                label_legend_to_plot.append(valid_labels[i])
+
+        if len(label_idx_to_plot) != 0:
+            p = parula.main(len(label_idx_to_plot) + 1)
+            label_colors_to_plot = p(np.arange(len(label_idx_to_plot)))
+            cycler = mpl.cycler(color=label_colors_to_plot)
+            sp11.set_prop_cycle(cycler)
+
+        # Block timings
+        block_edges = np.cumsum([0] + [x[1] for x in sorted(self.block_durations.items())])
+        block_edges_in_range = block_edges[
+            (block_edges >= time_range[0]) * (block_edges <= time_range[1])
+            ]
+        if show_blocks:
+            for sp in [sp11, sp12, sp13, *fig2_subplots]:
+                sp.set_xticks(t_factor * block_edges_in_range)
+                sp.set_xticklabels(sp.get_xticklabels(), rotation=90)
+
+        for block_counter in self.block_events:
+            block = self.get_block(block_counter)
+            is_valid = (time_range[0] <= t0 + self.block_durations[block_counter]
+                        and t0 <= time_range[1])
+            if is_valid:
+                if getattr(block, "label", None) is not None:
+                    for i in range(len(block.label)):
+                        if block.label[i].type == "labelinc":
+                            label_store[block.label[i].label] += block.label[i].value
+                        else:
+                            label_store[block.label[i].label] = block.label[i].value
+                    label_defined = True
+
+                if getattr(block, "adc", None) is not None:  # ADC
+                    adc = block.adc
+                    # From Pulseq: According to the information from Klaus Scheffler and indirectly from Siemens this
+                    # is the present convention - the samples are shifted by 0.5 dwell
+                    t = adc.delay + (np.arange(int(adc.num_samples)) + 0.5) * adc.dwell
+                    sp11.plot(t_factor * (t0 + t), np.zeros(len(t)), "rx")
+                    sp13.plot(
+                        t_factor * (t0 + t),
+                        np.angle(
+                            np.exp(1j * adc.phase_offset)
+                            * np.exp(1j * 2 * np.pi * t * adc.freq_offset)
+                        ),
+                        "b.",
+                        markersize=0.25,
+                    )
+
+                    if label_defined and len(label_idx_to_plot) != 0:
+                        arr_label_store = list(label_store.values())
+                        lbl_vals = np.take(arr_label_store, label_idx_to_plot)
+                        t = t0 + adc.delay + (adc.num_samples - 1) / 2 * adc.dwell
+                        _t = [t_factor * t] * len(lbl_vals)
+                        # Plot each label individually to retrieve each corresponding Line2D object
+                        p = itertools.chain.from_iterable(
+                            [
+                                sp11.plot(__t, _lbl_vals, ".")
+                                for __t, _lbl_vals in zip(_t, lbl_vals)
+                            ]
+                        )
+                        if len(label_legend_to_plot) != 0:
+                            sp11.legend(p, label_legend_to_plot, loc="upper left")
+                            label_legend_to_plot = []
+
+                if getattr(block, "rf", None) is not None:  # RF
+                    rf = block.rf
+                    tc, ic = calc_rf_center(rf)
+                    time = rf.t
+                    signal = rf.signal
+                    if abs(signal[0]) != 0:
+                        signal = np.concatenate(([0], signal))
+                        time = np.concatenate(([time[0]], time))
+                        ic += 1
+
+                    if abs(signal[-1]) != 0:
+                        signal = np.concatenate((signal, [0]))
+                        time = np.concatenate((time, [time[-1]]))
+
+                    sp12.plot(t_factor * (t0 + time + rf.delay), np.abs(signal))
+                    sp13.plot(
+                        t_factor * (t0 + time + rf.delay),
+                        np.angle(
+                            signal
+                            * np.exp(1j * rf.phase_offset)
+                            * np.exp(1j * 2 * math.pi * time * rf.freq_offset)
+                        ),
+                        t_factor * (t0 + tc + rf.delay),
+                        np.angle(
+                            signal[ic]
+                            * np.exp(1j * rf.phase_offset)
+                            * np.exp(1j * 2 * math.pi * time[ic] * rf.freq_offset)
+                        ),
+                        "xb",
+                    )
+
+                grad_channels = ["gx", "gy", "gz"]
+                for x in range(len(grad_channels)):  # Gradients
+                    if getattr(block, grad_channels[x], None) is not None:
+                        grad = getattr(block, grad_channels[x])
+                        if grad.type == "grad":
+                            # We extend the shape by adding the first and the last points in an effort of making the
+                            # display a bit less confusing...
+                            time = grad.delay + np.array([0, *grad.tt, grad.shape_dur])
+                            waveform = g_factor * np.array(
+                                (grad.first, *grad.waveform, grad.last)
+                            )
+                        else:
+                            time = np.array(cumsum(
+                                0,
+                                grad.delay,
+                                grad.rise_time,
+                                grad.flat_time,
+                                grad.fall_time,
+                            ))
+                            waveform = (
+                                    g_factor * grad.amplitude * np.array([0, 0, 1, 1, 0])
+                            )
+                        fig2_subplots[x].plot(t_factor * (t0 + time), waveform)
+            t0 += self.block_durations[block_counter]
+
+        grad_plot_labels = ["x", "y", "z"]
+        sp11.set_ylabel("ADC")
+        sp12.set_ylabel("RF mag (Hz)")
+        sp13.set_ylabel("RF/ADC phase (rad)")
+        sp13.set_xlabel(f"t ({time_disp})")
+        for x in range(3):
+            _label = grad_plot_labels[x]
+            fig2_subplots[x].set_ylabel(f"G{_label} ({grad_disp})")
+        fig2_subplots[-1].set_xlabel(f"t ({time_disp})")
+
+        # Setting display limits
+        disp_range = t_factor * np.array([time_range[0], min(t0, time_range[1])])
+        [x.set_xlim(disp_range) for x in [sp11, sp12, sp13, *fig2_subplots]]
+
+        # Grid on
+        for sp in [sp11, sp12, sp13, *fig2_subplots]:
+            sp.grid()
+
+        fig1.tight_layout()
+        fig2.tight_layout()
+        if save:
+            fig1.savefig("seq_plot1.jpg")
+            fig2.savefig("seq_plot2.jpg")
+
+        if tk_plot:
+            root = tk.Tk()
+            root.title("График с панелью инструментов")
+
+            canvas = FigureCanvasTkAgg(fig1, master=root)
+            canvas.draw()
+
+            # Добавляем панель инструментов (зумирование, сохранение и т. д.)
+            toolbar = NavigationToolbar2Tk(canvas, root)
+            toolbar.update()
+
+            canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
+
+        if plot_now:
+            plt.show()
+
+    def read(self, file_path: str, detect_rf_use: bool = False, remove_duplicates: bool = True) -> None:
+        """
+        Read `.seq` file from `file_path`.
+
+        Parameters
+        ----------
+        detect_rf_use
+        file_path : str
+            Path to `.seq` file to be read.
+        remove_duplicates : bool, default=True
+            Remove duplicate events from the sequence after reading.
+        """
+        if self.use_block_cache:
+            self.block_cache.clear()
+
+        read(self, path=file_path, detect_rf_use=detect_rf_use, remove_duplicates=remove_duplicates)
+
+        # Initialize next free block ID
+        self.next_free_block_ID = (max(self.block_events) + 1) if self.block_events else 1
+
+    def register_adc_event(self, event: EventLibrary) -> int:
+        return block.register_adc_event(self, event)
+
+    def register_grad_event(
+            self, event: SimpleNamespace
+    ) -> Union[int, Tuple[int, int]]:
+        return block.register_grad_event(self, event)
+
+    def register_label_event(self, event: SimpleNamespace) -> int:
+        return block.register_label_event(self, event)
+
+    def register_rf_event(self, event: SimpleNamespace) -> Tuple[int, List[int]]:
+        return block.register_rf_event(self, event)
+
+    def remove_duplicates(self, in_place: bool = False) -> Self:
+        """
+        Removes duplicate events from the shape and event libraries contained
+        in this sequence.
+
+        Parameters
+        ----------
+        in_place : bool, optional
+            If true, removes the duplicates from the current sequence.
+            Otherwise, a copy is created. The default is False.
+
+        Returns
+        -------
+        seq_copy : Sequence
+            If `in_place`, returns self. Otherwise returns a copy of the
+            sequence.
+        """
+        if in_place:
+            seq_copy = self
+        else:
+            # Avoid copying block_cache for performance
+            tmp = self.block_cache
+            self.block_cache = {}
+            seq_copy = deepcopy(self)
+            self.block_cache = tmp
+
+        # Find duplicate in shape library
+        seq_copy.shape_library, mapping = seq_copy.shape_library.remove_duplicates(9)
+
+        # Remap shape IDs of arbitrary gradient events
+        for grad_id in seq_copy.grad_library.data:
+            if seq_copy.grad_library.type[grad_id] == 'g':
+                data = seq_copy.grad_library.data[grad_id]
+                new_data = (data[0],) + (mapping[data[1]], mapping[data[2]]) + data[3:]
+                if data != new_data:
+                    seq_copy.grad_library.update(grad_id, None, new_data)
+
+        # Remap shape IDs of RF events
+        for rf_id in seq_copy.rf_library.data:
+            data = seq_copy.rf_library.data[rf_id]
+            new_data = (data[0],) + (mapping[data[1]], mapping[data[2]], mapping[data[3]]) + data[4:]
+            if data != new_data:
+                seq_copy.rf_library.update(rf_id, None, new_data)
+
+        # Filter duplicates in gradient library
+        seq_copy.grad_library, mapping = seq_copy.grad_library.remove_duplicates((6, -6, -6, -6, -6, -6))
+
+        # Remap gradient event IDs
+        for block_id in seq_copy.block_events:
+            seq_copy.block_events[block_id][2] = mapping[seq_copy.block_events[block_id][2]]
+            seq_copy.block_events[block_id][3] = mapping[seq_copy.block_events[block_id][3]]
+            seq_copy.block_events[block_id][4] = mapping[seq_copy.block_events[block_id][4]]
+
+        # Filter duplicates in RF library
+        seq_copy.rf_library, mapping = seq_copy.rf_library.remove_duplicates((6, 0, 0, 0, 6, 6, 6))
+
+        # Remap RF event IDs
+        for block_id in seq_copy.block_events:
+            seq_copy.block_events[block_id][1] = mapping[seq_copy.block_events[block_id][1]]
+
+        # Filter duplicates in ADC library
+        seq_copy.adc_library, mapping = seq_copy.adc_library.remove_duplicates((0, -9, -6, 6, 6, 6))
+
+        # Remap ADC event IDs
+        for block_id in seq_copy.block_events:
+            seq_copy.block_events[block_id][5] = mapping[seq_copy.block_events[block_id][5]]
+
+        return seq_copy
+
+    def rf_from_lib_data(self, lib_data: list, use: str = str()) -> SimpleNamespace:
+        """
+        Construct RF object from `lib_data`.
+
+        Parameters
+        ----------
+        lib_data : list
+            RF envelope.
+        use : str, default=str()
+            RF event use.
+
+        Returns
+        -------
+        rf : SimpleNamespace
+            RF object constructed from `lib_data`.
+        """
+        rf = SimpleNamespace()
+        rf.type = "rf"
+
+        amplitude, mag_shape, phase_shape = lib_data[0], lib_data[1], lib_data[2]
+        shape_data = self.shape_library.data[mag_shape]
+        compressed = SimpleNamespace()
+        compressed.num_samples = shape_data[0]
+        compressed.data = shape_data[1:]
+        mag = decompress_shape(compressed)
+        shape_data = self.shape_library.data[phase_shape]
+        compressed.num_samples = shape_data[0]
+        compressed.data = shape_data[1:]
+        phase = decompress_shape(compressed)
+        rf.signal = amplitude * mag * np.exp(1j * 2 * np.pi * phase)
+        time_shape = lib_data[3]
+        if time_shape > 0:
+            shape_data = self.shape_library.data[time_shape]
+            compressed.num_samples = shape_data[0]
+            compressed.data = shape_data[1:]
+            rf.t = decompress_shape(compressed) * self.rf_raster_time
+            rf.shape_dur = (
+                    math.ceil((rf.t[-1] - eps) / self.rf_raster_time) * self.rf_raster_time
+            )
+        else:  # Generate default time raster on the fly
+            rf.t = (np.arange(1, len(rf.signal) + 1) - 0.5) * self.rf_raster_time
+            rf.shape_dur = len(rf.signal) * self.rf_raster_time
+
+        rf.delay = lib_data[4]
+        rf.freq_offset = lib_data[5]
+        rf.phase_offset = lib_data[6]
+
+        rf.dead_time = self.system.rf_dead_time
+        rf.ringdown_time = self.system.rf_ringdown_time
+
+        if use != "":
+            use_cases = {
+                "e": "excitation",
+                "r": "refocusing",
+                "i": "inversion",
+                "s": "saturation",
+                "p": "preparation",
+            }
+            rf.use = use_cases[use] if use in use_cases else "undefined"
+
+        return rf
+
+    def rf_times(
+            self, time_range: List[float] = None
+    ) -> Tuple[List[float], np.ndarray, List[float], np.ndarray, np.ndarray]:
+        """
+        Return time points of excitations and refocusings.
+
+        Returns
+        -------
+        t_excitation : List[float]
+            Contains time moments of the excitation RF pulses
+        fp_excitation : np.ndarray
+            Contains frequency and phase offsets of the excitation RF pulses
+        t_refocusing : List[float]
+            Contains time moments of the refocusing RF pulses
+        fp_refocusing : np.ndarray
+            Contains frequency and phase offsets of the excitation RF pulses
+        """
+
+        # Collect RF timing data
+        t_excitation = []
+        fp_excitation = []
+        t_refocusing = []
+        fp_refocusing = []
+
+        curr_dur = 0
+        if time_range == None:
+            blocks = self.block_events
+        else:
+            if len(time_range) != 2:
+                raise ValueError('Time range must be list of two elements')
+            if time_range[0] > time_range[1]:
+                raise ValueError('End time of time_range must be after begin time')
+
+            # Calculate end times of each block
+            bd = np.array(list(self.block_durations.values()))
+            t = np.cumsum(bd)
+            # Search block end times for start of time range
+            begin_block = np.searchsorted(t, time_range[0])
+            # Search block begin times for end of time range
+            end_block = np.searchsorted(t - bd, time_range[1], side='right')
+            blocks = list(self.block_durations.keys())[begin_block:end_block]
+            curr_dur = t[begin_block] - bd[begin_block]
+
+        for block_counter in blocks:
+            block = self.get_block(block_counter)
+
+            if block.rf is not None:  # RF
+                rf = block.rf
+                t = rf.delay + calc_rf_center(rf)[0]
+                if not hasattr(rf, "use") or block.rf.use in [
+                    "excitation",
+                    "undefined",
+                ]:
+                    t_excitation.append(curr_dur + t)
+                    fp_excitation.append([block.rf.freq_offset, block.rf.phase_offset])
+                elif block.rf.use == "refocusing":
+                    t_refocusing.append(curr_dur + t)
+                    fp_refocusing.append([block.rf.freq_offset, block.rf.phase_offset])
+
+            curr_dur += self.block_durations[block_counter]
+
+        if len(t_excitation) != 0:
+            fp_excitation = np.array(fp_excitation).T
+        else:
+            fp_excitation = np.empty((2, 0))
+
+        if len(t_refocusing) != 0:
+            fp_refocusing = np.array(fp_refocusing).T
+        else:
+            fp_refocusing = np.empty((2, 0))
+
+        return t_excitation, fp_excitation, t_refocusing, fp_refocusing
+
+    def set_block(self, block_index: int, *args: SimpleNamespace) -> None:
+        """
+        Replace block at index with new block provided as block structure, add sequence block, or create a new block
+        from events and store at position specified by index. The block or events are provided in uncompressed form and
+        will be stored in the compressed, non-redundant internal libraries.
+
+        See also:
+        - `pypulseq.Sequence.sequence.Sequence.get_block()`
+        - `pypulseq.Sequence.sequence.Sequence.add_block()`
+
+        Parameters
+        ----------
+        block_index : int
+            Index at which block is replaced.
+        args : SimpleNamespace
+            Block or events to be replaced/added or created at `block_index`.
+        """
+        block.set_block(self, block_index, *args)
+
+        if block_index >= self.next_free_block_ID:
+            self.next_free_block_ID = block_index + 1
+
+    def set_definition(
+            self, key: str, value: Union[float, int, list, np.ndarray, str, tuple]
+    ) -> None:
+        """
+        Modify a custom definition of the sequence. Set the user definition 'key' to value 'value'. If the definition
+        does not exist it will be created.
+
+        See also `pypulseq.Sequence.sequence.Sequence.get_definition()`.
+
+        Parameters
+        ----------
+        key : str
+            Definition key.
+        value : int, list, np.ndarray, str or tuple
+            Definition value.
+        """
+        if key == "FOV":
+            if np.max(value) > 1:
+                text = "Definition FOV uses values exceeding 1 m. "
+                text += "New Pulseq interpreters expect values in units of meters."
+                warn(text)
+
+        self.definitions[key] = value
+
+    def set_extension_string_ID(self, extension_str: str, extension_id: int) -> None:
+        """
+        Set numeric ID for the given string extension ID.
+
+        Parameters
+        ----------
+        extension_str : str
+            Given string extension ID.
+        extension_id : int
+            Given numeric extension ID.
+
+        Raises
+        ------
+        ValueError
+            If given numeric or string extension ID is not unique.
+        """
+        if (
+                extension_str in self.extension_string_idx
+                or extension_id in self.extension_numeric_idx
+        ):
+            raise ValueError("Numeric or string ID is not unique")
+
+        self.extension_numeric_idx.append(extension_id)
+        self.extension_string_idx.append(extension_str)
+        assert len(self.extension_numeric_idx) == len(self.extension_string_idx)
+
+    def test_report(self) -> str:
+        """
+        Analyze the sequence and return a text report.
+        """
+        return ext_test_report(self)
+
+    def waveforms(
+            self, append_RF: bool = False, time_range: List[float] = None
+    ) -> Tuple[np.ndarray]:
+        """
+        Decompress the entire gradient waveform. Returns gradient waveforms as a tuple of `np.ndarray` of
+        `gradient_axes` (typically 3) dimensions. Each `np.ndarray` contains timepoints and the corresponding
+        gradient amplitude values.
+
+        Parameters
+        ----------
+        append_RF : bool, default=False
+            Boolean flag to indicate if RF wave shapes are to be appended after the gradients.
+
+        Returns
+        -------
+        wave_data : np.ndarray
+        """
+        grad_channels = ["gx", "gy", "gz"]
+
+        # Collect shape pieces
+        if append_RF:
+            shape_channels = len(grad_channels) + 1  # Last 'channel' is RF
+        else:
+            shape_channels = len(grad_channels)
+
+        shape_pieces = [[] for _ in range(shape_channels)]
+        out_len = np.zeros(shape_channels)  # Last 'channel' is RF
+
+        curr_dur = 0
+        if time_range == None:
+            blocks = self.block_events
+        else:
+            if len(time_range) != 2:
+                raise ValueError('Time range must be list of two elements')
+            if time_range[0] > time_range[1]:
+                raise ValueError('End time of time_range must be after begin time')
+
+            # Calculate end times of each block
+            bd = np.array(list(self.block_durations.values()))
+            t = np.cumsum(bd)
+            # Search block end times for start of time range
+            begin_block = np.searchsorted(t, time_range[0])
+            # Search block begin times for end of time range
+            end_block = np.searchsorted(t - bd, time_range[1], side='right')
+            blocks = list(self.block_durations.keys())[begin_block:end_block]
+            curr_dur = t[begin_block] - bd[begin_block]
+
+        for block_counter in blocks:
+            block = self.get_block(block_counter)
+
+            for j in range(len(grad_channels)):
+                grad = getattr(block, grad_channels[j])
+                if grad is not None:  # Gradients
+                    if grad.type == "grad":
+                        # Check if we have an extended trapezoid or an arbitrary gradient on a regular raster
+                        tt_rast = grad.tt / self.grad_raster_time + 0.5
+                        if np.all(
+                                np.abs(tt_rast - np.arange(1, len(tt_rast) + 1)) < eps
+                        ):  # Arbitrary gradient
+                            """
+                            Arbitrary gradient: restore & recompress shape - if we had a trapezoid converted to shape we
+                            have to find the "corners" and we can eliminate internal samples on the straight segments
+                            but first we have to restore samples on the edges of the gradient raster intervals for that
+                            we need the first sample.
+                            """
+
+                            # TODO: Implement restoreAdditionalShapeSamples
+                            #       https://github.com/pulseq/pulseq/blob/master/matlab/%2Bmr/restoreAdditionalShapeSamples.m
+
+                            out_len[j] += len(grad.tt) + 2
+                            shape_pieces[j].append(np.array(
+                                [
+                                    curr_dur + grad.delay + np.concatenate(
+                                        ([0], grad.tt, [grad.tt[-1] + self.grad_raster_time / 2])),
+                                    np.concatenate(([grad.first], grad.waveform, [grad.last]))
+                                ]
+                            ))
+                        else:  # Extended trapezoid
+                            out_len[j] += len(grad.tt)
+                            shape_pieces[j].append(np.array(
+                                [
+                                    curr_dur + grad.delay + grad.tt,
+                                    grad.waveform,
+                                ]
+                            ))
+                    else:
+                        if abs(grad.flat_time) > eps:
+                            out_len[j] += 4
+                            _temp = np.vstack(
+                                (
+                                    cumsum(
+                                        curr_dur + grad.delay,
+                                        grad.rise_time,
+                                        grad.flat_time,
+                                        grad.fall_time,
+                                    ),
+                                    grad.amplitude * np.array([0, 1, 1, 0]),
+                                )
+                            )
+                            shape_pieces[j].append(_temp)
+                        else:
+                            if abs(grad.rise_time) > eps and abs(grad.fall_time) > eps:
+                                out_len[j] += 3
+                                _temp = np.vstack(
+                                    (
+                                        cumsum(curr_dur + grad.delay, grad.rise_time, grad.fall_time),
+                                        grad.amplitude * np.array([0, 1, 0]),
+                                    )
+                                )
+                                shape_pieces[j].append(_temp)
+                            else:
+                                if abs(grad.amplitude) > eps:
+                                    print(
+                                        'Warning: "empty" gradient with non-zero magnitude detected in block {}'.format(
+                                            block_counter))
+
+            if block.rf is not None:  # RF
+                rf = block.rf
+                if append_RF:
+                    rf_piece = np.array(
+                        [
+                            curr_dur + rf.delay + rf.t,
+                            rf.signal
+                            * np.exp(
+                                1j
+                                * (rf.phase_offset + 2 * np.pi * rf.freq_offset * rf.t)
+                            ),
+                        ]
+                    )
+                    out_len[-1] += len(rf.t)
+
+                    if abs(rf.signal[0]) > 0:
+                        pre = np.array([[rf_piece[0, 0] - 0.1 * self.system.rf_raster_time], [0]])
+                        rf_piece = np.hstack((pre, rf_piece))
+                        out_len[-1] += pre.shape[1]
+
+                    if abs(rf.signal[-1]) > 0:
+                        post = np.array([[rf_piece[0, -1] + 0.1 * self.system.rf_raster_time], [0]])
+                        rf_piece = np.hstack((rf_piece, post))
+                        out_len[-1] += post.shape[1]
+
+                    shape_pieces[-1].append(rf_piece)
+
+            curr_dur += self.block_durations[block_counter]
+
+        # Collect wave data
+        wave_data = []
+
+        for j in range(shape_channels):
+            if shape_pieces[j] == []:
+                wave_data.append(np.zeros((2, 0)))
+                continue
+
+            # If the first element of the next shape has the same time as
+            # the last element of the previous shape, drop the first
+            # element of the next shape.
+            shape_pieces[j] = ([shape_pieces[j][0]] +
+                               [cur if prev[0, -1] + eps < cur[0, 0] else cur[:, 1:]
+                                for prev, cur in zip(shape_pieces[j][:-1], shape_pieces[j][1:])])
+
+            wave_data.append(np.concatenate(shape_pieces[j], axis=1))
+
+            rftdiff = np.diff(wave_data[j][0])
+            if np.any(rftdiff < eps):
+                raise Warning(
+                    "Time vector elements are not monotonically increasing."
+                )
+
+        return wave_data
+
+    def waveforms_and_times(
+            self, append_RF: bool = False, time_range: List[float] = None
+    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
+        """
+        Decompress the entire gradient waveform. Returns gradient waveforms as a tuple of `np.ndarray` of
+        `gradient_axes` (typically 3) dimensions. Each `np.ndarray` contains timepoints and the corresponding
+        gradient amplitude values. Additional return values are time points of excitations, refocusings and ADC
+        sampling points.
+
+        Parameters
+        ----------
+        append_RF : bool, default=False
+            Boolean flag to indicate if RF wave shapes are to be appended after the gradients.
+
+        Returns
+        -------
+        wave_data : np.ndarray
+        tfp_excitation : np.ndarray
+            Contains time moments, frequency and phase offsets of the excitation RF pulses (similar for `
+            tfp_refocusing`).
+        tfp_refocusing : np.ndarray
+        t_adc: np.ndarray
+            Contains times of all ADC sample points.
+        fp_adc : np.ndarray
+            Contains frequency and phase offsets of each ADC object (not samples).
+        """
+
+        wave_data = self.waveforms(append_RF=append_RF, time_range=time_range)
+        t_excitation, fp_excitation, t_refocusing, fp_refocusing = self.rf_times(time_range=time_range)
+        t_adc, fp_adc = self.adc_times(time_range=time_range)
+
+        # Join times, frequency and phases of RF pulses for compatibility with previous implementation
+        tfp_excitation = np.concatenate((np.array(t_excitation)[None], fp_excitation), axis=0)
+        tfp_refocusing = np.concatenate((np.array(t_refocusing)[None], fp_refocusing), axis=0)
+
+        return wave_data, tfp_excitation, tfp_refocusing, t_adc, fp_adc
+
+    def waveforms_export(self, time_range=(0, np.inf)) -> dict:
+        """
+        Plot `Sequence`.
+
+        Parameters
+        ----------
+        time_range : iterable, default=(0, np.inf)
+            Time range (x-axis limits) for all waveforms. Default is 0 to infinity (entire sequence).
+
+        Returns
+        -------
+        all_waveforms: dict
+            Dictionary containing the following sequence waveforms and time array(s):
+            - `t_adc` - ADC timing array [seconds]
+            - `t_rf` - RF timing array [seconds]
+            - `t_rf_centers`: `rf_t_centers`,
+            - `t_gx`: x gradient timing array,
+            - `t_gy`: y gradient timing array,
+            - `t_gz`: z gradient timing array,
+            - `adc` - ADC complex signal (amplitude=1, phase=adc phase) [a.u.]
+            - `rf` - RF complex signal
+            - `rf_centers`: RF centers array,
+            - `gx` - x gradient
+            - `gy` - y gradient
+            - `gz` - z gradient
+            - `grad_unit`: [kHz/m],
+            - `rf_unit`: [Hz],
+            - `time_unit`: [seconds],
+        """
+        # Check time range validity
+        if (
+                not all([isinstance(x, (int, float)) for x in time_range])
+                or len(time_range) != 2
+        ):
+            raise ValueError("Invalid time range")
+
+        t0 = 0
+        adc_t_all = np.array([])
+        adc_signal_all = np.array([], dtype=complex)
+        rf_t_all = np.array([])
+        rf_signal_all = np.array([], dtype=complex)
+        rf_t_centers = np.array([])
+        rf_signal_centers = np.array([], dtype=complex)
+        gx_t_all = np.array([])
+        gy_t_all = np.array([])
+        gz_t_all = np.array([])
+        gx_all = np.array([])
+        gy_all = np.array([])
+        gz_all = np.array([])
+
+        for block_counter in self.block_events:  # For each block
+            block = self.get_block(block_counter)  # Retrieve it
+            is_valid = (
+                    time_range[0] <= t0 <= time_range[1]
+            )  # Check if "current time" is within requested range.
+            if is_valid:
+                # Case 1: ADC
+                if block.adc != None:
+                    adc = block.adc  # Get adc info
+                    # From Pulseq: According to the information from Klaus Scheffler and indirectly from Siemens this
+                    # is the present convention - the samples are shifted by 0.5 dwell
+                    t = adc.delay + (np.arange(int(adc.num_samples)) + 0.5) * adc.dwell
+                    adc_t = t0 + t
+                    adc_signal = np.exp(1j * adc.phase_offset) * np.exp(
+                        1j * 2 * np.pi * t * adc.freq_offset
+                    )
+                    adc_t_all = np.concatenate((adc_t_all, adc_t))
+                    adc_signal_all = np.concatenate((adc_signal_all, adc_signal))
+
+                if block.rf != None:
+                    rf = block.rf
+                    tc, ic = calc_rf_center(rf)
+                    t = rf.t + rf.delay
+                    tc = tc + rf.delay
+
+                    # Debug - visualize
+                    # sp12.plot(t_factor * (t0 + t), np.abs(rf.signal))
+                    # sp13.plot(t_factor * (t0 + t), np.angle(rf.signal * np.exp(1j * rf.phase_offset)
+                    #                                         * np.exp(1j * 2 * math.pi * rf.t * rf.freq_offset)),
+                    #           t_factor * (t0 + tc), np.angle(rf.signal[ic] * np.exp(1j * rf.phase_offset)
+                    #                                          * np.exp(1j * 2 * math.pi * rf.t[ic] * rf.freq_offset)),
+                    #           'xb')
+
+                    rf_t = t0 + t
+                    rf = (
+                            rf.signal
+                            * np.exp(1j * rf.phase_offset)
+                            * np.exp(1j * 2 * math.pi * rf.t * rf.freq_offset)
+                    )
+                    rf_t_all = np.concatenate((rf_t_all, rf_t))
+                    rf_signal_all = np.concatenate((rf_signal_all, rf))
+                    rf_t_centers = np.concatenate((rf_t_centers, [rf_t[ic]]))
+                    rf_signal_centers = np.concatenate((rf_signal_centers, [rf[ic]]))
+
+                grad_channels = ["gx", "gy", "gz"]
+                for x in range(
+                        len(grad_channels)
+                ):  # Check each gradient channel: x, y, and z
+                    if getattr(block, grad_channels[x]) != None:
+                        # If this channel is on in current block
+                        grad = getattr(block, grad_channels[x])
+                        if grad.type == "grad":  # Arbitrary gradient option
+                            # In place unpacking of grad.t with the starred expression
+                            g_t = (
+                                    t0
+                                    + grad.delay
+                                    + [
+                                        0,
+                                        *(grad.tt + (grad.tt[1] - grad.tt[0]) / 2),
+                                        grad.tt[-1] + grad.tt[1] - grad.tt[0],
+                                    ]
+                            )
+                            g = 1e-3 * np.array((grad.first, *grad.waveform, grad.last))
+                        else:  # Trapezoid gradient option
+                            g_t = cumsum(
+                                t0,
+                                grad.delay,
+                                grad.rise_time,
+                                grad.flat_time,
+                                grad.fall_time,
+                            )
+                            g = 1e-3 * grad.amplitude * np.array([0, 0, 1, 1, 0])
+
+                        if grad.channel == "x":
+                            gx_t_all = np.concatenate((gx_t_all, g_t))
+                            gx_all = np.concatenate((gx_all, g))
+                        elif grad.channel == "y":
+                            gy_t_all = np.concatenate((gy_t_all, g_t))
+                            gy_all = np.concatenate((gy_all, g))
+                        elif grad.channel == "z":
+                            gz_t_all = np.concatenate((gz_t_all, g_t))
+                            gz_all = np.concatenate((gz_all, g))
+
+            t0 += self.block_durations[
+                block_counter
+            ]  # "Current time" gets updated to end of block just examined
+
+        all_waveforms = {
+            "t_adc": adc_t_all,
+            "t_rf": rf_t_all,
+            "t_rf_centers": rf_t_centers,
+            "t_gx": gx_t_all,
+            "t_gy": gy_t_all,
+            "t_gz": gz_t_all,
+            "adc": adc_signal_all,
+            "rf": rf_signal_all,
+            "rf_centers": rf_signal_centers,
+            "gx": gx_all,
+            "gy": gy_all,
+            "gz": gz_all,
+            "grad_unit": "[kHz/m]",
+            "rf_unit": "[Hz]",
+            "time_unit": "[seconds]",
+        }
+
+        return all_waveforms
+
+    def write(self, name: str, create_signature: bool = True, remove_duplicates: bool = True) -> Union[str, None]:
+        """
+        Write the sequence data to the given filename using the open file format for MR sequences.
+
+        See also `pypulseq.Sequence.read_seq.read()`.
+
+        Parameters
+        ----------
+        name : str
+            Filename of `.seq` file to be written to disk.
+        create_signature : bool, default=True
+            Boolean flag to indicate if the file has to be signed.
+        remove_duplicates : bool, default=True
+            Remove duplicate events from the sequence before writing
+
+        Returns
+        -------
+        signature or None : If create_signature is True, it returns the written .seq file's signature as a string,
+        otherwise it returns None. Note that, if remove_duplicates is True, signature belongs to the
+        deduplicated sequences signature, and not the Sequence that is stored in the Sequence object.
+        """
+        signature = write_seq(self, name, create_signature)
+
+        if signature is not None:
+            self.signature_type = "md5"
+            self.signature_file = "text"
+            self.signature_value = signature
+            return signature
+        else:
+            return None

+ 269 - 0
LF_scanner/pypulseq/Sequence/write_seq.py

@@ -0,0 +1,269 @@
+import hashlib
+
+import numpy as np
+
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_labels
+
+
+def write(self, file_name: str, create_signature) -> None:
+    """
+    Write the sequence data to the given filename using the open file format for MR sequences.
+
+    See also `pypulseq.Sequence.read_seq.read()`.
+
+    Parameters
+    ----------
+    file_name : str
+        File name of `.seq` file to be written to disk.
+    create_signature : bool
+
+    Raises
+    ------
+    RuntimeError
+        If an unsupported definition is encountered.
+    """
+    # `>.0f` for decimals.
+    # `>g` to truncate insignificant zeros.
+    file_name += ".seq" if file_name[-4:] != ".seq" not in file_name else ""
+    with open(file_name, "w") as output_file:
+        output_file.write("# Pulseq sequence file\n")
+        output_file.write("# Created by PyPulseq\n\n")
+
+        output_file.write("[VERSION]\n")
+        output_file.write(f"major {self.version_major}\n")
+        output_file.write(f"minor {self.version_minor}\n")
+        output_file.write(f"revision {self.version_revision}\n")
+        output_file.write("\n")
+
+        if len(self.definitions) != 0:
+            output_file.write("[DEFINITIONS]\n")
+            keys = sorted(list(self.definitions.keys()))
+            values = [self.definitions[k] for k in keys]
+            for block_counter in range(len(keys)):
+                output_file.write(f"{keys[block_counter]} ")
+                if isinstance(values[block_counter], str):
+                    output_file.write(values[block_counter] + " ")
+                elif isinstance(values[block_counter], (int, float)):
+                    output_file.write(f"{values[block_counter]:0.9g} ")
+                elif isinstance(
+                    values[block_counter], (list, tuple, np.ndarray)
+                ):  # For example, [FOVx, FOVy, FOVz]
+                    for i in range(len(values[block_counter])):
+                        if isinstance(values[block_counter][i], (int, float)):
+                            output_file.write(f"{values[block_counter][i]:0.9g} ")
+                        else:
+                            output_file.write(f"{values[block_counter][i]} ")
+                else:
+                    raise RuntimeError("Unsupported definition")
+                output_file.write("\n")
+            output_file.write("\n")
+
+        output_file.write("# Format of blocks:\n")
+        output_file.write("# NUM DUR RF  GX  GY  GZ  ADC  EXT\n")
+        output_file.write("[BLOCKS]\n")
+        id_format_width = "{:" + str(len(str(len(self.block_events)))) + "d}"
+        id_format_str = id_format_width + " {:3d} {:3d} {:3d} {:3d} {:3d} {:2d} {:2d}\n"
+        for block_counter in range(len(self.block_events)):
+            block_duration = (
+                self.block_durations[block_counter + 1] / self.block_duration_raster
+            )
+            block_duration_rounded = int(np.round(block_duration))
+
+            assert np.abs(block_duration_rounded - block_duration) < 1e-6
+
+            s = id_format_str.format(
+                *(
+                    block_counter + 1,
+                    block_duration_rounded,
+                    *self.block_events[block_counter + 1][1:],
+                )
+            )
+            output_file.write(s)
+        output_file.write("\n")
+
+        if len(self.rf_library.data) != 0:
+            output_file.write("# Format of RF events:\n")
+            output_file.write(
+                "# id amplitude mag_id phase_id time_shape_id delay freq phase\n"
+            )
+            output_file.write(
+                "# ..        Hz   ....     ....          ....    us   Hz   rad\n"
+            )
+            output_file.write("[RF]\n")
+            rf_lib_keys = self.rf_library.data
+            id_format_str = "{:.0f} {:12g} {:.0f} {:.0f} {:.0f} {:g} {:g} {:g}\n"  # Refer lines 20-21
+            for k in sorted(rf_lib_keys.keys()):
+                lib_data1 = self.rf_library.data[k][0:4]
+                lib_data2 = self.rf_library.data[k][5:7]
+                delay = (
+                    np.round(self.rf_library.data[k][4] / self.rf_raster_time)
+                    * self.rf_raster_time
+                    * 1e6
+                )
+                s = id_format_str.format(k, *lib_data1, delay, *lib_data2)
+                output_file.write(s)
+            output_file.write("\n")
+
+        grad_lib_values = np.array(list(self.grad_library.type.values()))
+        arb_grad_mask = grad_lib_values == "g"
+        trap_grad_mask = grad_lib_values == "t"
+
+        if np.any(arb_grad_mask):
+            output_file.write("# Format of arbitrary gradients:\n")
+            output_file.write(
+                "#   time_shape_id of 0 means default timing (stepping with grad_raster starting at 1/2 of grad_raster)\n"
+            )
+            output_file.write("# id amplitude amp_shape_id time_shape_id delay\n")
+            output_file.write("# ..      Hz/m       ..         ..          us\n")
+            output_file.write("[GRADIENTS]\n")
+            id_format_str = "{:.0f} {:12g} {:.0f} {:.0f} {:.0f}\n"  # Refer lines 20-21
+            keys = np.array(list(self.grad_library.data.keys()))
+            for k in keys[arb_grad_mask]:
+                s = id_format_str.format(
+                    k,
+                    *self.grad_library.data[k][:3],
+                    np.round(self.grad_library.data[k][3] * 1e6),
+                )
+                output_file.write(s)
+            output_file.write("\n")
+
+        if np.any(trap_grad_mask):
+            output_file.write("# Format of trapezoid gradients:\n")
+            output_file.write("# id amplitude rise flat fall delay\n")
+            output_file.write("# ..      Hz/m   us   us   us    us\n")
+            output_file.write("[TRAP]\n")
+            keys = np.array(list(self.grad_library.data.keys()))
+            id_format_str = "{:2g} {:12g} {:3g} {:4g} {:3g} {:3g}\n"
+            for k in keys[trap_grad_mask]:
+                data = np.copy(
+                    self.grad_library.data[k]
+                )  # Make a copy to leave the original untouched
+                data[1:] = np.round(1e6 * data[1:])
+                """
+                Python & Numpy always round to nearest even value - inconsistent with MATLAB Pulseq's .seq files.
+                [1] https://stackoverflow.com/questions/29671945/format-string-rounding-inconsistent
+                [2] https://stackoverflow.com/questions/50374779/how-to-avoid-incorrect-rounding-with-numpy-round
+                """
+                s = id_format_str.format(k, *data)
+                output_file.write(s)
+            output_file.write("\n")
+
+        if len(self.adc_library.data) != 0:
+            output_file.write("# Format of ADC events:\n")
+            output_file.write("# id num dwell delay freq phase\n")
+            output_file.write("# ..  ..    ns    us   Hz   rad\n")
+            output_file.write("[ADC]\n")
+            keys = sorted(self.adc_library.data.keys())
+            id_format_str = (
+                "{:.0f} {:.0f} {:.0f} {:.0f} {:g} {:g}\n"  # Refer lines 20-21
+            )
+            for k in keys:
+                data = np.multiply(self.adc_library.data[k][0:5], [1, 1e9, 1e6, 1, 1])
+                s = id_format_str.format(k, *data)
+                output_file.write(s)
+            output_file.write("\n")
+
+        if len(self.extensions_library.data) != 0:
+            output_file.write("# Format of extension lists:\n")
+            output_file.write("# id type ref next_id\n")
+            output_file.write("# next_id of 0 terminates the list\n")
+            output_file.write(
+                "# Extension list is followed by extension specifications\n"
+            )
+            output_file.write("[EXTENSIONS]\n")
+            keys = sorted(self.extensions_library.data.keys())
+            id_format_str = "{:.0f} {:.0f} {:.0f} {:.0f}\n"  # Refer lines 20-21
+            for k in keys:
+                s = id_format_str.format(k, *np.round(self.extensions_library.data[k]))
+                output_file.write(s)
+            output_file.write("\n")
+
+        if len(self.trigger_library.data) != 0:
+            output_file.write(
+                "# Extension specification for digital output and input triggers:\n"
+            )
+            output_file.write("# id type channel delay (us) duration (us)\n")
+            output_file.write(
+                f'extension TRIGGERS {self.get_extension_type_ID("TRIGGERS")}\n'
+            )
+            keys = sorted(self.trigger_library.data.keys())
+            id_format_str = "{:.0f} {:.0f} {:.0f} {:.0f} {:.0f}\n"  # Refer lines 20-21
+            for k in keys:
+                s = id_format_str.format(
+                    k, *np.round(self.trigger_library.data[k] * [1, 1, 1e6, 1e6])
+                )
+                output_file.write(s)
+            output_file.write("\n")
+
+        if len(self.label_set_library.data) != 0:
+            labels = get_supported_labels()
+
+            output_file.write("# Extension specification for setting labels:\n")
+            output_file.write("# id set labelstring\n")
+            tid = self.get_extension_type_ID("LABELSET")
+            output_file.write(f"extension LABELSET {tid}\n")
+            keys = sorted(self.label_set_library.data.keys())
+            id_format_str = "{:.0f} {:.0f} {}\n"  # Refer lines 20-21
+            for k in keys:
+                value = self.label_set_library.data[k][0]
+                label_id = labels[
+                    int(self.label_set_library.data[k][1]) - 1
+                ]  # label_id is +1 in add_block()
+                s = id_format_str.format(k, value, label_id)
+                output_file.write(s)
+            output_file.write("\n")
+
+            output_file.write("# Extension specification for setting labels:\n")
+            output_file.write("# id set labelstring\n")
+            tid = self.get_extension_type_ID("LABELINC")
+            output_file.write(f"extension LABELINC {tid}\n")
+            keys = sorted(self.label_inc_library.data.keys())
+            id_format_str = "{:.0f} {:.0f} {}\n"  # See comment at the beginning of this method definition
+            for k in keys:
+                value = self.label_inc_library.data[k][0]
+                label_id = labels[
+                    self.label_inc_library.data[k][1] - 1
+                ]  # label_id is +1 in add_block()
+                s = id_format_str.format(k, value, label_id)
+                output_file.write(s)
+            output_file.write("\n")
+
+        if len(self.shape_library.data) != 0:
+            output_file.write("# Sequence Shapes\n")
+            output_file.write("[SHAPES]\n\n")
+            keys = sorted(self.shape_library.data.keys())
+            for k in keys:
+                shape_data = self.shape_library.data[k]
+                s = "shape_id {:.0f}\n".format(k)
+                output_file.write(s)
+                s = "num_samples {:.0f}\n".format(shape_data[0])
+                output_file.write(s)
+                s = ("{:.9g}\n" * len(shape_data[1:])).format(*shape_data[1:])
+                output_file.write(s)
+                output_file.write("\n")
+
+    if create_signature:  # Sign the file
+        # Calculate digest
+        with open(file_name, "r") as output_file:
+            buffer = output_file.read()
+
+            md5 = hashlib.md5(buffer.encode("utf-8")).hexdigest()
+            self.signature_type = "md5"
+            self.signature_file = "text"
+            self.signature_value = md5
+
+        # Write signature
+        with open(file_name, "a") as output_file:
+            output_file.write("\n[SIGNATURE]\n")
+            output_file.write(
+                "# This is the hash of the Pulseq file, calculated right before the [SIGNATURE] section was added\n"
+            )
+            output_file.write(
+                "# It can be reproduced/verified with md5sum if the file trimmed to the position right above [SIGNATURE]\n"
+            )
+            output_file.write(
+                "# The new line character preceding [SIGNATURE] BELONGS to the signature (and needs to be stripped away for "
+                "recalculating/verification)\n"
+            )
+            output_file.write("Type md5\n")
+            output_file.write(f"Hash {md5}\n")

+ 41 - 0
LF_scanner/pypulseq/__init__.py

@@ -0,0 +1,41 @@
+import numpy as np
+import os
+import sys
+sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'pypulseq')))
+
+# =========
+# BANKER'S ROUNDING FIX
+# =========
+def round_half_up(n, decimals=0):
+    """
+    Avoid banker's rounding inconsistencies; from https://realpython.com/python-rounding/#rounding-half-up
+    """
+    multiplier = 10**decimals
+    return np.floor(np.abs(n) * multiplier + 0.5) / multiplier
+
+
+# =========
+# NP.FLOAT EPSILON
+# =========
+eps = np.finfo(np.float64).eps
+
+# =========
+# PACKAGE-LEVEL IMPORTS
+# =========
+try:
+    from LF_scanner.pypulseq.SAR.SAR_calc import calc_SAR
+except Exception as _sar_import_error:
+    # Optional dependency (matplotlib) may be missing; keep core sequence API usable.
+    def calc_SAR(*args, **kwargs):
+        raise ImportError(
+            "calc_SAR is unavailable because optional SAR dependencies are missing"
+        ) from _sar_import_error
+
+from LF_scanner.pypulseq.Sequence.sequence import Sequence
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.check_timing import check_timing
+from LF_scanner.pypulseq.make_adc import make_adc
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.make_sinc_pulse import make_sinc_pulse
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.opts import Opts

+ 222 - 0
LF_scanner/pypulseq/add_gradients.py

@@ -0,0 +1,222 @@
+from copy import deepcopy
+from types import SimpleNamespace
+from typing import Iterable
+
+import numpy as np
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.make_arbitrary_grad import make_arbitrary_grad
+from LF_scanner.pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.points_to_waveform import points_to_waveform
+
+
+def add_gradients(
+    grads: Iterable[SimpleNamespace],
+    max_grad: int = 0,
+    max_slew: int = 0,
+    system=Opts(),
+) -> SimpleNamespace:
+    """
+    Returns the superposition of several gradients.
+
+    Parameters
+    ----------
+    grads : [SimpleNamespace, ...]
+        Gradient events.
+    system : Opts, default=Opts()
+        System limits.
+    max_grad : float, default=0
+        Maximum gradient amplitude.
+    max_slew : float, default=0
+        Maximum slew rate.
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Superimposition of gradient events from `grads`.
+    """
+    # copy() to emulate pass-by-value; otherwise passed grad events are modified
+    grads = deepcopy(grads)
+
+    if max_grad <= 0:
+        max_grad = system.max_grad
+    if max_slew <= 0:
+        max_slew = system.max_slew
+
+    if len(grads) < 2:
+        raise ValueError("Cannot add less than two gradients")
+
+    # First gradient defines channel
+    channel = grads[0].channel
+
+    # Find out the general delay of all gradients and other statistics
+    delays, firsts, lasts, durs, is_trap, is_arb = [], [], [], [], [], []
+    for ii in range(len(grads)):
+        if grads[ii].channel != channel:
+            raise ValueError("Cannot add gradients on different channels.")
+
+        delays.append(grads[ii].delay)
+        firsts.append(grads[ii].first)
+        lasts.append(grads[ii].last)
+        durs.append(calc_duration(grads[ii]))
+        is_trap.append(grads[ii].type == "trap")
+        if is_trap[-1]:
+            is_arb.append(False)
+        else:
+            tt_rast = grads[ii].tt / system.grad_raster_time + 0.5
+            is_arb.append(np.all(np.abs(tt_rast - np.arange(len(tt_rast)))) < eps)
+
+    # Convert to numpy.ndarray for fancy-indexing later on
+    firsts, lasts = np.array(firsts), np.array(lasts)
+
+    common_delay = np.min(delays)
+    total_duration = np.max(durs)
+
+    # Check if we have a set of traps with the same timing
+    if np.all(is_trap):
+        cond1 = 1 == len(np.unique([g.delay for g in grads]))
+        cond2 = 1 == len(np.unique([g.rise_time for g in grads]))
+        cond3 = 1 == len(np.unique([g.flat_time for g in grads]))
+        cond4 = 1 == len(np.unique([g.fall_time for g in grads]))
+        if cond1 and cond2 and cond3 and cond4:
+            grad = grads[0]
+            grad.amplitude = np.sum([g.amplitude for g in grads])
+            grad.area = np.sum([g.area for g in grads])
+            grad.flat_area = np.sum([g.flat_area for g in grads])
+
+            return grad
+
+    # Check if we only have arbitrary grads on irregular time samplings, optionally mixed with trapezoids
+    if np.all(np.logical_or(is_trap, np.logical_not(is_arb))):
+        # Keep shapes still rather simple
+        times = []
+        for ii in range(len(grads)):
+            g = grads[ii]
+            if g.type == "trap":
+                times.extend(
+                    np.cumsum([g.delay, g.rise_time, g.flat_time, g.fall_time])
+                )
+            else:
+                times.extend(g.delay + g.tt)
+
+        times = np.sort(np.unique(times))
+        dt = times[1:] - times[:-1]
+        ieps = dt < eps
+        if np.any(ieps):
+            dtx = [times[0], *dt]
+            dtx[ieps] = (
+                dtx[ieps] + dtx[ieps + 1]
+            )  # Assumes that no more than two too similar values can occur
+            dtx[ieps + 1] = []
+            times = np.cumsum(dtx)
+
+        amplitudes = np.zeros_like(times)
+        for ii in range(len(grads)):
+            g = grads[ii]
+            if g.type == "trap":
+                if g.flat_time > 0:  # Trapezoid or triangle
+                    g.tt = np.cumsum([0, g.rise_time, g.flat_time, g.fall_time])
+                    g.waveform = [0, g.amplitude, g.amplitude, 0]
+                else:
+                    g.tt = np.cumsum([0, g.rise_time, g.fall_time])
+                    g.waveform = [0, g.amplitude, 0]
+
+            tt = g.delay + g.tt
+            # Fix rounding for the first and last time points
+            i_min = np.argmin(np.abs(tt[0] - times))
+            t_min = (np.abs(tt[0] - times))[i_min]
+            if t_min < eps:
+                tt[0] = times[i_min]
+            i_min = np.argmin(np.abs(tt[-1] - times))
+            t_min = (np.abs(tt[-1] - times))[i_min]
+            if t_min < eps:
+                tt[-1] = times[i_min]
+
+            if np.abs(g.waveform[0]) > eps and tt[0] > eps:
+                tt[0] += eps
+
+            amplitudes += np.interp(xp=tt, fp=g.waveform, x=times)
+
+        grad = make_extended_trapezoid(
+            channel=channel, amplitudes=amplitudes, times=times, system=system
+        )
+        return grad
+
+    # Convert everything to a regularly-sampled waveform
+    waveforms = dict()
+    max_length = 0
+    for ii in range(len(grads)):
+        g = grads[ii]
+        if g.type == "grad":
+            if is_arb[ii]:
+                waveforms[ii] = g.waveform
+            else:
+                waveforms[ii] = points_to_waveform(
+                    amplitudes=g.waveform,
+                    times=g.tt,
+                    grad_raster_time=system.grad_raster_time,
+                )
+        elif g.type == "trap":
+            if g.flat_time > 0:  # Triangle or trapezoid
+                times = np.array(
+                    [
+                        g.delay - common_delay,
+                        g.delay - common_delay + g.rise_time,
+                        g.delay - common_delay + g.rise_time + g.flat_time,
+                        g.delay
+                        - common_delay
+                        + g.rise_time
+                        + g.flat_time
+                        + g.fall_time,
+                    ]
+                )
+                amplitudes = np.array([0, g.amplitude, g.amplitude, 0])
+            else:
+                times = np.array(
+                    [
+                        g.delay - common_delay,
+                        g.delay - common_delay + g.rise_time,
+                        g.delay - common_delay + g.rise_time + g.fall_time,
+                    ]
+                )
+                amplitudes = np.array([0, g.amplitude, 0])
+            waveforms[ii] = points_to_waveform(
+                amplitudes=amplitudes,
+                times=times,
+                grad_raster_time=system.grad_raster_time,
+            )
+        else:
+            raise ValueError("Unknown gradient type")
+
+        if g.delay - common_delay > 0:
+            # Stop for numpy.arange is not g.delay - common_delay - system.grad_raster_time like in Matlab
+            # so as to include the endpoint
+            t_delay = np.arange(0, g.delay - common_delay, step=system.grad_raster_time)
+            waveforms[ii] = np.insert(waveforms[ii], 0, t_delay)
+
+        num_points = len(waveforms[ii])
+        max_length = num_points if num_points > max_length else max_length
+
+    w = np.zeros(max_length)
+    for ii in range(len(grads)):
+        wt = np.zeros(max_length)
+        wt[0 : len(waveforms[ii])] = waveforms[ii]
+        w += wt
+
+    grad = make_arbitrary_grad(
+        channel=channel,
+        waveform=w,
+        system=system,
+        max_slew=max_slew,
+        max_grad=max_grad,
+        delay=common_delay,
+    )
+    # Fix the first and the last values
+    # First is defined by the sum of firsts with the minimal delay (common_delay)
+    # Last is defined by the sum of lasts with the maximum duration (total_duration)
+    grad.first = np.sum(firsts[np.array(delays) == common_delay])
+    grad.last = np.sum(lasts[np.where(durs == total_duration)])
+
+    return grad

+ 92 - 0
LF_scanner/pypulseq/add_ramps.py

@@ -0,0 +1,92 @@
+from copy import copy
+from types import SimpleNamespace
+from typing import Union, List
+
+import numpy as np
+
+from pypulseq.calc_ramp import calc_ramp
+from pypulseq.opts import Opts
+
+
+def add_ramps(
+    k: Union[list, np.ndarray, tuple],
+    max_grad: int = 0,
+    max_slew: int = 0,
+    rf: SimpleNamespace = None,
+    system=Opts(),
+) -> List[np.ndarray]:
+    """
+    Add segments to the trajectory to ramp to and from the given trajectory.
+
+    Parameters
+    ----------
+    k : numpy.ndarray, or [numpy.ndarray, ...]
+        If `k` is a single trajectory: Add a segment to `k` so `k_out` travels from 0 to `k[0]` and a segment so `k_out`
+        goes from `k[-1]` back to 0 without violating the gradient and slew constraints.
+        If `k` is multiple trajectoriess: add segments of the same length for each trajectory in the cell array.
+    system : Opts, default=Opts()
+        System limits.
+    rf : SimpleNamespace, default=None
+        Add a segment of zeros over the ramp times to an RF shape.
+    max_grad : int, default=0
+        Maximum gradient amplitude.
+    max_slew : int, default=0
+        Maximum slew rate.
+
+    Returns
+    -------
+    result : [numpy.ndarray, ...]
+        List of ramped up and ramped down k-space trajectories from `k`.
+
+    Raises
+    ------
+    ValueError
+        If `k` is not list, np.ndarray or tuple
+    RuntimeError
+        If gradient ramps fail to be calculated
+    """
+    if not isinstance(k, (list, np.ndarray, tuple)):
+        raise ValueError(
+            f"k has to be one of list, np.ndarray, tuple. Passed: {type(k)}"
+        )
+
+    k_arg = copy(k)
+    if max_grad > 0:
+        system.max_grad = max_grad
+
+    if max_slew > 0:
+        system.max_slew = max_slew
+
+    k = np.vstack(k)
+    num_channels = k.shape[0]
+    k = np.vstack(
+        (k, np.zeros((3 - num_channels, k.shape[1])))
+    )  # Pad with zeros if needed
+
+    k_up, ok1 = calc_ramp(k0=np.zeros((3, 2)), k_end=k[:, :2], system=system)
+    k_down, ok2 = calc_ramp(k0=k[:, -2:], k_end=np.zeros((3, 2)), system=system)
+    if not (ok1 and ok2):
+        raise RuntimeError("Failed to calculate gradient ramps")
+
+    # Add start and end points to ramps
+    k_up = np.hstack((np.zeros((3, 2)), k_up))
+    k_down = np.hstack((k_down, np.zeros((3, 1))))
+
+    # Add ramps to trajectory
+    k = np.hstack((k_up, k, k_down))
+
+    result = []
+    if not isinstance(k_arg, list):
+        result.append(k[:num_channels])
+    else:
+        for i in range(num_channels):
+            result.append(k[i])
+
+    if rf is not None:
+        result.append(
+            np.concatenate(
+                (np.zeros(k_up.shape[1] * 10), rf, np.zeros(k_down.shape[1] * 10))
+            )
+        )
+
+    return result

+ 81 - 0
LF_scanner/pypulseq/align.py

@@ -0,0 +1,81 @@
+from copy import deepcopy
+from types import SimpleNamespace
+from typing import List, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_duration import calc_duration
+
+
+def align(
+    **kwargs: Union[SimpleNamespace, List[SimpleNamespace]]
+) -> List[SimpleNamespace]:
+    """
+    Sets delays of the objects within the block to achieve the desired alignment of the objects in the block. Aligns
+    objects as per specified alignment options by setting delays of the pulse sequence events within the block. All
+    previously configured delays within objects are taken into account during calculating of the block duration but
+    then reset according to the selected alignment. Possible values for align_spec are 'left', 'center', 'right'.
+
+    Parameters
+    ----------
+    args : dict{str, [SimpleNamespace, ...]}
+        Dictionary mapping of alignment options and `SimpleNamespace` objects.
+        Format: alignment_spec1=SimpleNamespace, alignment_spec2=[SimpleNamespace, ...], ...
+        Alignment spec must be one of `left`, `center` or `right`.
+
+    Returns
+    -------
+    objects : [SimpleNamespace, ...]
+        List of aligned `SimpleNamespace` objects.
+
+    Raises
+    ------
+    ValueError
+        If first parameter is not of type `str`.
+        If invalid alignment spec is passed. Must be one of `left`, `center` or `right`.
+
+    Examples
+    --------
+    al_grad1, al_grad2, al_grad3 = align(right=[grad1, grad2, grad3])
+    """
+    alignment_specs = list(kwargs.keys())
+    if not isinstance(alignment_specs[0], str):
+        raise ValueError(
+            f"First parameter must be of type str. Passed: {type(alignment_specs[0])}"
+        )
+
+    alignment_options = ["left", "center", "right"]
+    if np.any([align_opt not in alignment_options for align_opt in alignment_specs]):
+        raise ValueError("Invalid alignment spec.")
+
+    alignments = []
+    objects = []
+    for curr_align in alignment_specs:
+        objects_to_align = kwargs[curr_align]
+        curr_align = alignment_options.index(curr_align)
+        if isinstance(objects_to_align, (list, np.ndarray, tuple)):
+            alignments.extend([curr_align] * len(objects_to_align))
+            objects.extend(objects_to_align)
+        elif isinstance(objects_to_align, SimpleNamespace):
+            alignments.extend([curr_align])
+            objects.append(objects_to_align)
+
+    dur = calc_duration(*objects)
+
+    # copy() to emulate pass-by-value; otherwise passed events are modified
+    objects = deepcopy(objects)
+
+    # Set new delays
+    for i in range(len(objects)):
+        if alignments[i] == 0:
+            objects[i].delay = 0
+        elif alignments[i] == 1:
+            objects[i].delay = (dur - calc_duration(objects[i])) / 2
+        elif alignments[i] == 2:
+            objects[i].delay = dur - calc_duration(objects[i]) + objects[i].delay
+            if objects[i].delay < 0:
+                raise ValueError(
+                    "align() attempts to set a negative delay, probably some RF pulses ignore rf_ringdown_time"
+                )
+
+    return objects

+ 53 - 0
LF_scanner/pypulseq/block_to_events.py

@@ -0,0 +1,53 @@
+from types import SimpleNamespace
+from typing import Tuple
+
+
+def block_to_events(*args: SimpleNamespace) -> Tuple[SimpleNamespace, ...]:
+    """
+    Converts `args` from a block to a list of events. If `args` is already a list of event(s), returns it unmodified.
+
+    Parameters
+    ----------
+    args : SimpleNamespace
+        Block to be flattened into a list of events.
+
+    Returns
+    -------
+    events : list[SimpleNamespace]
+        List of events comprising `args` if it was a block, otherwise `args` unmodified.
+    """
+    if (
+        len(args) == 1
+    ):  # args is a tuple consisting a block of events, or a single event
+        x = args[0]
+
+        if isinstance(x, (float, int)):  # args is block duration
+            events = [x]
+        else:  # args could be a block of events or a single event
+            events = list(vars(x).values())  # Get all attrs
+            events = list(
+                filter(lambda filter_none: filter_none is not None, events)
+            )  # Filter None attributes
+            # If all attrs are either float/SimpleNamespace, args is a block of events
+            if all([isinstance(e, (float, SimpleNamespace)) for e in events]):
+                events = __get_label_events_if_any(
+                    *events
+                )  # Flatten label events from dict datatype
+            else:  # Else, args is a single event
+                events = [x]
+    else:  # args is a tuple of events
+        events = __get_label_events_if_any(*args)
+
+    return events
+
+
+def __get_label_events_if_any(*events: list) -> list:
+    # Are any of the events labels? If yes, extract them from dict()
+    final_events = []
+    for e in events:
+        if isinstance(e, dict):  # Only labels are stored as dicts
+            final_events.extend(e.values())
+        else:
+            final_events.append(e)
+
+    return final_events

+ 61 - 0
LF_scanner/pypulseq/calc_duration.py

@@ -0,0 +1,61 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+from LF_scanner.pypulseq.block_to_events import block_to_events
+
+
+def calc_duration(*args: SimpleNamespace) -> float:
+    """
+    Calculate the duration of an event or block.
+
+    Parameters
+    ----------
+    args : SimpleNamespace
+        Block or events.
+
+    Returns
+    -------
+    duration : float
+        Cumulative duration of `args`.
+    """
+    events = block_to_events(*args)
+
+    duration = 0
+    for event in events:
+        if isinstance(event, (float, int)):  # block_duration field
+            assert duration <= event
+            duration = event
+            continue
+
+        if not isinstance(event, (dict, SimpleNamespace)):
+            raise TypeError(
+                "input(s) should be of type SimpleNamespace or a dict() in case of LABELINC or LABELSET"
+            )
+
+        if event.type == "delay":
+            duration = np.max([duration, event.delay])
+        elif event.type == "rf":
+            duration = np.max(
+                [duration, event.delay + event.shape_dur + event.ringdown_time]
+            )
+        elif event.type == "grad":
+            duration = np.max([duration, event.delay + event.shape_dur])
+        elif event.type == "adc":
+            duration = np.max(
+                [
+                    duration,
+                    event.delay + event.num_samples * event.dwell + event.dead_time,
+                ]
+            )
+        elif event.type == "trap":
+            duration = np.max(
+                [
+                    duration,
+                    event.delay + event.rise_time + event.flat_time + event.fall_time,
+                ]
+            )
+        elif event.type == "output" or event.type == "trigger":
+            duration = np.max([duration, event.delay + event.duration])
+
+    return duration

+ 355 - 0
LF_scanner/pypulseq/calc_ramp.py

@@ -0,0 +1,355 @@
+from typing import Tuple
+
+import numpy as np
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def calc_ramp(
+    k0: np.ndarray,
+    k_end: np.ndarray,
+    max_grad: np.ndarray = np.zeros(0),
+    max_points: int = 500,
+    max_slew: np.ndarray = np.zeros(0),
+    system: Opts = Opts(),
+) -> Tuple[np.ndarray, bool]:
+    """
+    Join the points `k0` and `k_end` in three-dimensional  k-space in minimal time, observing the gradient and slew
+    limits (`max_grad` and `max_slew` respectively), and the gradient strength `G0` before `k0[:, 1]` and `Gend` after
+    `k_end[:, 1]`. In the context of a fixed gradient dwell time this is a discrete problem with an a priori unknown
+    number of discretization steps. Therefore this method tries out the optimization with 0 steps, then 1 step, and so
+    on, until  all conditions can be fulfilled, thus yielding a short connection.
+
+    Parameters
+    ----------
+    k0 : numpy.ndarray
+        Two preceding points in k-space. Shape is `[3, 2]`. From these points, the starting gradient will be calculated.
+    k_end : numpy.ndarray
+        Two following points in k-space. Shape is `[3, 2]`. From these points, the target gradient will be calculated.
+    max_grad : float or array_like, default=0
+        Maximum total gradient strength. Either a single value or one value for each coordinate, of shape `[3, 1]`.
+    max_points : int, default=500
+        Maximum number of k-space points to be used in connecting `k0` and `k_end`.
+    max_slew : float or array_like, default=0
+        Maximum total slew rate. Either a single value or one value for each coordinate, of shape `[3, 1]`.
+    system : Opts, default=Opts()
+        System limits.
+
+    Returns
+    -------
+    k_out : numpy.ndarray
+        Connected k-space trajectory.
+    success : bool
+        Boolean flag indicating if `k0` and `k_end` were successfully joined.
+    """
+
+    def __inside_limits(grad, slew):
+        if mode == 0:
+            grad2 = np.sum(np.square(grad), axis=1)
+            slew2 = np.sum(np.square(slew), axis=1)
+            ok = np.all(np.max(grad2) <= np.square(max_grad)) and np.all(
+                np.max(slew2) <= np.square(max_slew)
+            )
+        else:
+            ok = (np.sum(np.max(np.abs(grad), axis=1) <= max_grad) == 3) and (
+                np.sum(np.max(np.abs(slew), axis=1) <= max_slew) == 3
+            )
+
+        return ok
+
+    def __joinleft0(k0, k_end, use_points, G0, G_end):
+        if use_points == 0:
+            G = np.stack((G0, (k_end - k0) / grad_raster, G_end)).T
+            S = (G[:, 1:] - G[:, :-1]) / grad_raster
+
+            k_out_left = np.zeros((3, 0))
+            success = __inside_limits(G, S)
+
+            return success, k_out_left
+
+        dk = (k_end - k0) / (use_points + 1)
+        kopt = k0 + dk
+        Gopt = (kopt - k0) / grad_raster
+        Sopt = (Gopt - G0) / grad_raster
+
+        okGopt = np.sum(np.square(Gopt)) <= np.square(max_grad)
+        okSopt = np.sum(np.square(Sopt)) <= np.square(max_slew)
+
+        if okGopt and okSopt:
+            k_left = kopt
+        else:
+            a = np.multiply(max_grad, grad_raster)
+            b = np.multiply(max_slew, grad_raster**2)
+
+            dkprol = G0 * grad_raster
+            dkconn = dk - dkprol
+
+            ksl = k0 + dkprol + dkconn / np.linalg.norm(dkconn) * b
+            Gsl = (ksl - k0) / grad_raster
+            okGsl = np.sum(np.square(Gsl)) <= np.square(max_grad)
+
+            kgl = k0 + np.multiply(dk / np.linalg.norm(dk), a)
+            Ggl = (kgl - k0) / grad_raster
+            Sgl = (Ggl - G0) / grad_raster
+            okSgl = np.sum(np.square(Sgl)) <= np.square(max_slew)
+
+            if okGsl:
+                k_left = ksl
+            elif okSgl:
+                k_left = kgl
+            else:
+                c = np.linalg.norm(dkprol)
+                c1 = np.divide(np.square(a) - np.square(b) + np.square(c), (2 * c))
+                h = np.sqrt(np.square(a) - np.square(c1))
+                kglsl = k0 + np.multiply(c1, np.divide(dkprol, np.linalg.norm(dkprol)))
+                projondkprol = (kgl * dkprol.T) * (dkprol / np.linalg.norm(dkprol))
+                hdirection = kgl - projondkprol
+                kglsl = kglsl + h * hdirection / np.linalg.norm(hdirection)
+                k_left = kglsl
+
+        success, k = __joinright0(
+            k_left, k_end, (k_left - k0) / grad_raster, G_end, use_points - 1
+        )
+        if len(k) != 0:
+            if len(k.shape) == 1:
+                k = k.reshape((len(k), 1))
+            if len(k_left.shape) == 1:
+                k_left = k_left.reshape((len(k_left), 1))
+            k_out_left = np.hstack((k_left, k))
+        else:
+            k_out_left = k_left
+
+        return success, k_out_left
+
+    def __joinleft1(k0, k_end, use_points, G0, G_end):
+        if use_points == 0:
+            G = np.stack((G0, (k_end - k0) / grad_raster, G_end))
+            S = (G[:, 1:] - G[:, :-1]) / grad_raster
+
+            k_out_left = np.zeros((3, 0))
+            success = __inside_limits(G, S)
+
+            return success, k_out_left
+
+        k_left = np.zeros(3)
+
+        dk = (k_end - k0) / (use_points + 1)
+        kopt = k0 + dk
+        Gopt = (kopt - k0) / grad_raster
+        Sopt = (Gopt - G0) / grad_raster
+
+        okGopt = np.abs(Gopt) <= max_grad
+        okSopt = np.abs(Sopt) <= max_slew
+
+        dkprol = G0 * grad_raster
+        dkconn = dk - dkprol
+
+        ksl = k0 + dkprol + np.multiply(np.sign(dkconn), max_slew) * grad_raster**2
+        Gsl = (ksl - k0) / grad_raster
+        okGsl = np.abs(Gsl) <= max_grad
+
+        kgl = k0 + np.multiply(np.sign(dk), max_grad) * grad_raster**2
+        Ggl = (kgl - k0) / grad_raster
+        Sgl = (Ggl - G0) / grad_raster
+        okSgl = np.abs(Sgl) <= max_slew
+
+        for ii in range(3):
+            if okGopt[ii] == 1 and okSopt[ii] == 1:
+                k_left[ii] = kopt[ii]
+            elif okGsl[ii] == 1:
+                k_left[ii] = ksl[ii]
+            elif okSgl[ii] == 1:
+                k_left[ii] = kgl[ii]
+            else:
+                print("Unknown error")
+
+        success, k = __joinright1(
+            k0=k_left,
+            k_end=k_end,
+            use_points=use_points - 1,
+            G0=(k_left - k0) / grad_raster,
+            G_end=G_end,
+        )
+        if len(k) != 0:
+            if len(k.shape) == 1:
+                k = k.reshape((len(k), 1))
+            if len(k_left.shape) == 1:
+                k_left = k_left.reshape((len(k_left), 1))
+            k_out_left = np.hstack((k_left, k))
+        else:
+            k_out_left = k_left
+
+        return success, k_out_left
+
+    def __joinright0(k0, k_end, use_points, G0, G_end):
+        if use_points == 0:
+            G = np.stack((G0, (k_end - k0) / grad_raster, G_end)).T
+            S = (G[:, 1:] - G[:, :-1]) / grad_raster
+
+            k_out_right = np.zeros((3, 0))
+            success = __inside_limits(G, S)
+
+            return success, k_out_right
+
+        dk = (k0 - k_end) / (use_points + 1)
+        kopt = k_end + dk
+        Gopt = (k_end - kopt) / grad_raster
+        Sopt = (G_end - Gopt) / grad_raster
+
+        okGopt = np.sum(np.square(Gopt)) <= np.square(max_grad)
+        okSopt = np.sum(np.square(Sopt)) <= np.square(max_slew)
+
+        if okGopt and okSopt:
+            k_right = kopt
+        else:
+            a = np.multiply(max_grad, grad_raster)
+            b = np.multiply(max_slew, grad_raster**2)
+
+            dkprol = -G_end * grad_raster
+            dkconn = dk - dkprol
+
+            ksl = k_end + dkprol + dkconn / np.linalg.norm(dkconn) * b
+            Gsl = (k_end - ksl) / grad_raster
+            okGsl = np.sum(np.square(Gsl)) <= np.square(max_grad)
+
+            kgl = k_end + np.multiply(dk / np.linalg.norm(dk), a)
+            Ggl = (k_end - kgl) / grad_raster
+            Sgl = (G_end - Ggl) / grad_raster
+            okSgl = np.sum(np.square(Sgl)) <= np.square(max_slew)
+
+            if okGsl:
+                k_right = ksl
+            elif okSgl:
+                k_right = kgl
+            else:
+                c = np.linalg.norm(dkprol)
+                c1 = np.divide(np.square(a) - np.square(b) + np.square(c), (2 * c))
+                h = np.sqrt(np.square(a) - np.square(c1))
+                kglsl = k_end + np.multiply(
+                    c1, np.divide(dkprol, np.linalg.norm(dkprol))
+                )
+                projondkprol = (kgl * dkprol.T) * (dkprol / np.linalg.norm(dkprol))
+                hdirection = kgl - projondkprol
+                kglsl = kglsl + h * hdirection / np.linalg.norm(hdirection)
+                k_right = kglsl
+
+        success, k = __joinleft0(
+            k0=k0,
+            k_end=k_right,
+            G0=G0,
+            G_end=(k_end - k_right) / grad_raster,
+            use_points=use_points - 1,
+        )
+        if len(k) != 0:
+            if len(k.shape) == 1:
+                k = k.reshape((len(k), 1))
+            if len(k_right.shape) == 1:
+                k_right = k_right.reshape((len(k_right), 1))
+            k_out_right = np.hstack((k, k_right))
+        else:
+            k_out_right = k_right
+
+        return success, k_out_right
+
+    def __joinright1(k0, k_end, use_points, G0, G_end):
+        if use_points == 0:
+            G = np.stack((G0, (k_end - k0) / grad_raster, G_end))
+            S = (G[:, 1:] - G[:, :-1]) / grad_raster
+
+            k_out_right = np.zeros((3, 0))
+            success = __inside_limits(G, S)
+
+            return success, k_out_right
+
+        k_right = np.zeros(3)
+
+        dk = (k0 - k_end) / (use_points + 1)
+        kopt = k_end + dk
+        Gopt = (k_end - kopt) / grad_raster
+        Sopt = (G_end - Gopt) / grad_raster
+
+        okGopt = np.abs(Gopt) <= max_grad
+        okSopt = np.abs(Sopt) <= max_slew
+
+        dkprol = -G_end * grad_raster
+        dkconn = dk - dkprol
+
+        ksl = k_end + dkprol + np.multiply(np.sign(dkconn), max_slew) * grad_raster**2
+        Gsl = (k_end - ksl) / grad_raster
+        okGsl = np.abs(Gsl) <= max_grad
+
+        kgl = k_end + np.multiply(np.sign(dk), max_grad) * grad_raster
+        Ggl = (k_end - kgl) / grad_raster
+        Sgl = (G_end - Ggl) / grad_raster
+        okSgl = np.abs(Sgl) <= max_slew
+
+        for ii in range(3):
+            if okGopt[ii] == 1 and okSopt[ii] == 1:
+                k_right[ii] = kopt[ii]
+            elif okGsl[ii] == 1:
+                k_right[ii] = ksl[ii]
+            elif okSgl[ii] == 1:
+                k_right[ii] = kgl[ii]
+            else:
+                print("Unknown error")
+
+        success, k = __joinleft1(
+            k0=k0,
+            k_end=k_right,
+            use_points=use_points - 1,
+            G0=G0,
+            G_end=(k_end - k_right) / grad_raster,
+        )
+        if len(k) != 0:
+            if len(k.shape) == 1:
+                k = k.reshape((len(k), 1))
+            if len(k_right.shape) == 1:
+                k_right = k_right.reshape((len(k_right), 1))
+            k_out_right = np.hstack((k, k_right))
+        else:
+            k_out_right = k_right
+
+        return success, k_out_right
+
+    # =========
+    # MAIN FUNCTION
+    # =========
+    if np.all(np.where(max_grad <= 0)):
+        max_grad = [system.max_grad]
+    if np.all(np.where(max_slew <= 0)):
+        max_slew = [system.max_slew]
+
+    grad_raster = system.grad_raster_time
+
+    if len(max_grad) == 1 and len(max_slew) == 1:
+        mode = 0
+    elif len(max_grad) == 3 and len(max_slew) == 3:
+        mode = 1
+    else:
+        raise ValueError("Input value max grad or max slew in invalid format.")
+
+    G0 = (k0[:, 1] - k0[:, 0]) / grad_raster
+    G_end = (k_end[:, 1] - k_end[:, 0]) / grad_raster
+    k0 = k0[:, 1]
+    k_end = k_end[:, 0]
+
+    success = 0
+    k_out = np.zeros((3, 0))
+    use_points = 0
+
+    while success == 0 and use_points <= max_points:
+        if mode == 0:
+            if np.linalg.norm(G0) > max_grad or np.linalg.norm(G_end) > max_grad:
+                break
+            success, k_out = __joinleft0(
+                k0=k0, k_end=k_end, G0=G0, G_end=G_end, use_points=use_points
+            )
+        else:
+            if np.abs(G0) > np.abs(max_grad) or np.abs(G_end) > np.abs(max_grad):
+                break
+            success, k_out = __joinleft1(
+                k0=k0, k_end=k_end, use_points=use_points, G0=G0, G_end=G_end
+            )
+        use_points += 1
+
+    return k_out, success

+ 65 - 0
LF_scanner/pypulseq/calc_rf_bandwidth.py

@@ -0,0 +1,65 @@
+from types import SimpleNamespace
+from typing import Union, Tuple
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_rf_center import calc_rf_center
+
+
+def calc_rf_bandwidth(
+    rf: SimpleNamespace,
+    cutoff: float = 0.5,
+    return_axis: bool = False,
+    return_spectrum: bool = False,
+) -> Union[float, Tuple[float, np.ndarray], Tuple[float, np.ndarray, float]]:
+    """
+    Calculate the spectrum of the RF pulse. Returns the bandwidth of the pulse (calculated by a simple FFT, e.g.
+    presuming a low-angle approximation) and optionally the spectrum and the frequency axis. The default for the
+    optional parameter 'cutoff' is 0.5.
+
+    Parameters
+    ----------
+    rf : SimpleNamespace
+        RF pulse event.
+    cutoff : float, default=0.5
+    return_axis : bool, default=False
+        Boolean flag to indicate if frequency axis of RF pulse will be returned.
+    return_spectrum : bool, default=False
+        Boolean flag to indicate if spectrum of RF pulse will be returned.
+    Returns
+    -------
+    bw : float
+        Bandwidth of the RF pulse.
+
+    """
+    time_center, _ = calc_rf_center(rf)
+
+    # Resample the pulse to a reasonable time array
+    dw = 10  # Hz
+    dt = 1e-6  # For now, 1 MHz
+    nn = np.round(1 / dw / dt)
+    tt = np.arange(-np.floor(nn / 2), np.ceil(nn / 2) - 1) * dt
+
+    rfs = np.interp(xp=rf.t - time_center, fp=rf.signal, x=tt)
+    spectrum = np.fft.fftshift(np.fft.fft(np.fft.fftshift(rfs)))
+    w = np.arange(-np.floor(nn / 2), np.ceil(nn / 2) - 1) * dw
+
+    w1 = __find_flank(w, spectrum, cutoff)
+    w2 = __find_flank(w[::-1], spectrum[::-1], cutoff)
+
+    bw = w2 - w1
+
+    if return_spectrum and not return_axis:
+        return bw, spectrum
+    if return_axis:
+        return bw, spectrum, w
+
+    return bw
+
+
+def __find_flank(x, f, c):
+    m = np.max(np.abs(f))
+    f = np.abs(f) / m
+    i = np.argwhere(f > c)[0]
+
+    return x[i]

+ 31 - 0
LF_scanner/pypulseq/calc_rf_center.py

@@ -0,0 +1,31 @@
+from types import SimpleNamespace
+from typing import Tuple
+
+import numpy as np
+
+
+def calc_rf_center(rf: SimpleNamespace) -> Tuple[float, float]:
+    """
+    Calculate the time point of the effective rotation calculated as the peak of the radio-frequency amplitude for the
+    shaped pulses and the center of the pulse for the block pulses. Zero padding in the radio-frequency pulse is
+    considered as a part of the shape. Delay field of the radio-frequency object is not taken into account.
+
+    Parameters
+    ----------
+    rf : SimpleNamespace
+        Radio-frequency pulse event.
+
+    Returns
+    -------
+    time_center : float
+        Time point of the center of the radio-frequency pulse.
+    id_center : float
+        Corresponding position of `time_center` in the radio-frequency pulse's envelope.
+    """
+    # Detect the excitation peak; if i is a plateau take its center
+    rf_max = np.max(np.abs(rf.signal))
+    i_peak = np.where(np.abs(rf.signal) >= rf_max * 0.99999)[0]
+    time_center = (rf.t[i_peak[0]] + rf.t[i_peak[-1]]) / 2
+    id_center = i_peak[int(np.round((len(i_peak) - 1) / 2))]
+
+    return time_center, id_center

+ 119 - 0
LF_scanner/pypulseq/check_timing.py

@@ -0,0 +1,119 @@
+from types import SimpleNamespace
+from typing import Tuple
+
+import numpy as np
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.opts import Opts
+
+
+def check_timing(system: Opts, *events: SimpleNamespace) -> Tuple[bool, str, float]:
+    """
+    Checks if timings of `events` are aligned with the corresponding raster time.
+
+    Parameters
+    ----------
+    system : Opts
+        System limits object.
+    events : SimpleNamespace
+        Events.
+
+    Returns
+    -------
+    is_ok : bool
+        Boolean flag indicating if timing of events `events` are aligned with gradient raster time
+        `system.grad_raster_time`.
+    text_err : str
+        Error string, if timings are not aligned.
+    total_duration : float
+        Total duration of events.
+
+    Raises
+    ------
+    ValueError
+        If incorrect data type is encountered in `events`.
+    """
+    if len(events) == 0:
+        text_err = "Empty or damaged block detected"
+        is_ok = False
+        total_duration = 0.0
+        return is_ok, text_err, total_duration
+
+    total_duration = calc_duration(*events)
+    is_ok = __div_check(total_duration, system.block_duration_raster)
+    text_err = "" if is_ok else f"Total duration: {total_duration * 1e6} us"
+
+    for e in events:
+        if isinstance(e, (float, int)):  # Special handling for block_duration
+            continue
+        elif not isinstance(e, (dict, SimpleNamespace)):
+            raise ValueError(
+                "Wrong data type of variable arguments, list[SimpleNamespace] expected."
+            )
+        ok = True
+        if isinstance(e, list) and len(e) > 1:
+            # For now this is only the case for arrays of extensions, but we cannot actually check extensions anyway...
+            continue
+        if hasattr(e, "type") and (e.type == "adc" or e.type == "rf"):
+            raster = system.rf_raster_time
+        else:
+            raster = system.grad_raster_time
+
+        if hasattr(e, "delay"):
+            if e.delay < -eps:
+                ok = False
+            if not __div_check(e.delay, raster):
+                ok = False
+
+        if hasattr(e, "duration"):
+            if not __div_check(e.duration, raster):
+                ok = False
+
+        if hasattr(e, "dwell"):
+            if (
+                e.dwell < system.adc_raster_time
+                or np.abs(
+                    np.round(e.dwell / system.adc_raster_time) * system.adc_raster_time
+                    - e.dwell
+                )
+                > 1e-10
+            ):
+                ok = False
+
+        if hasattr(e, "type") and e.type == "trap":
+            if (
+                not __div_check(e.rise_time, system.grad_raster_time)
+                or not __div_check(e.flat_time, system.grad_raster_time)
+                or not __div_check(e.fall_time, system.grad_raster_time)
+            ):
+                ok = False
+
+        if not ok:
+            is_ok = False
+
+            text_err = "["
+            if hasattr(e, "type"):
+                text_err += f"type: {e.type} "
+            if hasattr(e, "delay"):
+                text_err += f"delay: {e.delay * 1e6} us "
+            if hasattr(e, "duration"):
+                text_err += f"duration: {e.duration * 1e6} us"
+            if hasattr(e, "dwell"):
+                text_err += f"dwell: {e.dwell * 1e9} ns"
+            if hasattr(e, "type") and e.type == "trap":
+                text_err += (
+                    f"rise time: {e.rise_time * 1e6} flat time: {e.flat_time * 1e6} "
+                    f"fall time: {e.fall_time * 1e6} us"
+                )
+            text_err += "]"
+
+    return is_ok, text_err, total_duration
+
+
+def __div_check(a: float, b: float) -> bool:
+    """
+    Checks whether `a` can be divided by `b` to an accuracy of 1e-9.
+    """
+    c = a / b
+    return abs(c - np.round(c)) < 1e-9

+ 76 - 0
LF_scanner/pypulseq/compress_shape.py

@@ -0,0 +1,76 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+
+def compress_shape(
+    decompressed_shape: np.ndarray, force_compression: bool = False
+) -> SimpleNamespace:
+    """
+    Compress a gradient or pulse shape waveform using a run-length compression scheme on the derivative. This strategy
+    encodes constant and linear waveforms with very few samples. A structure is returned with the fields:
+    - num_samples - the number of samples in the uncompressed waveform
+    - data - containing the compressed waveform
+
+    See also `pypulseq.decompress_shape.py`.
+
+    Parameters
+    ----------
+    decompressed_shape : numpy.ndarray
+        Decompressed shape.
+    force_compression: bool, default=False
+        Boolean flag to indicate if compression is forced.
+
+    Returns
+    -------
+    compressed_shape : SimpleNamespace
+        A `SimpleNamespace` object containing the number of samples and the compressed data.
+    """
+    if np.any(~np.isfinite(decompressed_shape)):
+        raise ValueError("compress_shape() received infinite samples.")
+
+    if (
+        not force_compression and len(decompressed_shape) <= 4
+    ):  # Avoid compressing very short shapes
+        compressed_shape = SimpleNamespace()
+        compressed_shape.num_samples = len(decompressed_shape)
+        compressed_shape.data = decompressed_shape
+        return compressed_shape
+
+    # Single precision floating point has ~7.25 decimal places
+    quant_factor = 1e-7
+    decompressed_shape_scaled = decompressed_shape / quant_factor
+    datq = np.round(
+        np.insert(np.diff(decompressed_shape_scaled), 0, decompressed_shape_scaled[0])
+    )
+    qerr = decompressed_shape_scaled - np.cumsum(datq)
+    qcor = np.insert(np.diff(np.round(qerr)), 0, 0)
+    datd = datq + qcor
+
+    mask_changes = np.insert(np.asarray(np.diff(datd) != 0, dtype=np.int32), 0, 1)
+    # Elements without repetitions
+    vals = datd[mask_changes.nonzero()[0]] * quant_factor
+
+    # Indices of changes
+    k = np.append(mask_changes, 1).nonzero()[0]
+    # Number of repetitions
+    n = np.diff(k)
+
+    n_extra = (n - 2).astype(np.float32)  # Cast as float for nan assignment to work
+    vals2 = np.copy(vals)
+    vals2[n_extra < 0] = np.nan
+    n_extra[n_extra < 0] = np.nan
+    v = np.stack((vals, vals2, n_extra))
+    v = v.T[np.isfinite(v).T]  # Use transposes to match Matlab's Fortran indexing order
+    v[abs(v) < 1e-10] = 0
+
+    compressed_shape = SimpleNamespace()
+    compressed_shape.num_samples = len(decompressed_shape)
+
+    # Decide whether compression makes sense, otherwise store the original
+    if force_compression or compressed_shape.num_samples > len(v):
+        compressed_shape.data = v
+    else:
+        compressed_shape.data = decompressed_shape
+
+    return compressed_shape

+ 91 - 0
LF_scanner/pypulseq/convert.py

@@ -0,0 +1,91 @@
+from typing import Iterable, Union
+
+import numpy as np
+
+
+def convert(
+    from_value: Union[float, Iterable],
+    from_unit: str,
+    gamma: float = 42.576e6,
+    to_unit: str = str(),
+) -> Union[float, Iterable]:
+    """
+    Converts gradient amplitude or slew rate from unit `from_unit` to unit `to_unit` with gyromagnetic ratio `gamma`.
+
+    Parameters
+    ----------
+    from_value : float
+        Gradient amplitude or slew rate to convert from.
+    from_unit : str
+        Unit of gradient amplitude or slew rate to convert from.
+    to_unit : str, default=''
+        Unit of gradient amplitude or slew rate to convert to.
+    gamma : float, default=42.576e6
+        Gyromagnetic ratio. Default is 42.576e6, for Hydrogen.
+
+    Returns
+    -------
+    out : float
+        Converted gradient amplitude or slew rate.
+
+    Raises
+    ------
+    ValueError
+        If an invalid `from_unit` is passed. Must be one of 'Hz/m', 'mT/m', or 'rad/ms/mm'.
+        If an invalid `to_unit` is passed. Must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s', 'rad/ms/mm/ms'.
+    """
+    valid_grad_units = ["Hz/m", "mT/m", "rad/ms/mm"]
+    valid_slew_units = ["Hz/m/s", "mT/m/ms", "T/m/s", "rad/ms/mm/ms"]
+    valid_units = valid_grad_units + valid_slew_units
+
+    if from_unit not in valid_units:
+        raise ValueError(
+            "Invalid from_unit. Must be one of 'Hz/m', 'mT/m', or 'rad/ms/mm' for gradients;"
+            "or must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s', 'rad/ms/mm/ms' for slew rate."
+        )
+
+    if to_unit != "" and to_unit not in valid_units:
+        raise ValueError(
+            "Invalid to_unit. Must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s', 'rad/ms/mm/ms' for gradients;"
+            "or must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s', 'rad/ms/mm/ms' for slew rate.."
+        )
+
+    if to_unit == "":
+        if from_unit in valid_grad_units:
+            to_unit = valid_grad_units[0]
+        elif from_unit in valid_slew_units:
+            to_unit = valid_slew_units[0]
+
+    # Convert to standard units
+    # Grad units
+    if from_unit == "Hz/m":
+        standard = from_value
+    elif from_unit == "mT/m":
+        standard = from_value * 1e-3 * gamma
+    elif from_unit == "rad/ms/mm":
+        standard = from_value * 1e6 / (2 * np.pi)
+    # Slew units
+    elif from_unit == "Hz/m/s":
+        standard = from_value
+    elif from_unit == "mT/m/ms" or from_unit == "T/m/s":
+        standard = from_value * gamma
+    elif from_unit == "rad/ms/mm/ms":
+        standard = from_value * 1e9 / (2 * np.pi)
+
+    # Convert from standard units
+    # Grad units
+    if to_unit == "Hz/m":
+        out = standard
+    elif to_unit == "mT/m":
+        out = 1e3 * standard / gamma
+    elif to_unit == "rad/ms/mm":
+        out = standard * 2 * np.pi * 1e-6
+    # Slew units
+    elif to_unit == "Hz/m/s":
+        out = standard
+    elif to_unit == "mT/m/ms" or to_unit == "T/m/s":
+        out = standard / gamma
+    elif to_unit == "rad/ms/mm/ms":
+        out = standard * 2 * np.pi * 1e-9
+
+    return out

+ 74 - 0
LF_scanner/pypulseq/decompress_shape.py

@@ -0,0 +1,74 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+
+def decompress_shape(
+    compressed_shape: SimpleNamespace, force_decompression: bool = False
+) -> np.ndarray:
+    """
+    Decompress a gradient or pulse shape compressed with a run-length compression scheme on the derivative. The given
+    shape is structure with the following fields:
+    - num_samples - the number of samples in the uncompressed waveform
+    - data - containing the compressed waveform
+
+    See also `compress_shape.py`.
+
+    Parameters
+    ----------
+    compressed_shape : SimpleNamespace
+        Run-length encoded shape.
+    force_decompression : bool, default=False
+
+    Returns
+    -------
+    decompressed_shape : numpy.ndarray
+        Decompressed shape.
+    """
+    data_pack = compressed_shape.data
+    data_pack_len = len(data_pack)
+    num_samples = int(compressed_shape.num_samples)
+
+    if not force_decompression and num_samples == data_pack_len:
+        # Uncompressed shape
+        decompressed_shape = data_pack
+        return decompressed_shape
+
+    decompressed_shape = np.zeros(num_samples)  # Pre-allocate result matrix
+
+    # Decompression starts here
+    data_pack_diff = data_pack[1:] - data_pack[:-1]
+
+    # When data_pack_diff == 0 the subsequent samples are equal ==> marker for repeats (run-length encoding)
+    data_pack_markers = np.where(data_pack_diff == 0.0)[0]
+
+    count_pack = 0  # Points to current compressed sample
+    count_unpack = 0  # Points to current uncompressed sample
+
+    for i in range(len(data_pack_markers)):
+        # This index may have "false positives", e.g. if the value 3 repeats 3 times, then we will have 3 3 3
+        next_pack = data_pack_markers[i]
+        current_unpack_samples = next_pack - count_pack
+        if current_unpack_samples < 0:  # Rejects false positives
+            continue
+        elif current_unpack_samples > 0:  # We have an unpacked block to copy
+            decompressed_shape[
+                count_unpack : count_unpack + current_unpack_samples
+            ] = data_pack[count_pack:next_pack]
+            count_pack += current_unpack_samples
+            count_unpack += current_unpack_samples
+
+        # Packed/repeated section
+        rep = int(data_pack[count_pack + 2] + 2)
+        decompressed_shape[count_unpack : (count_unpack + rep)] = data_pack[count_pack]
+        count_pack += 3
+        count_unpack += rep
+
+    # Samples left?
+    if count_pack <= data_pack_len - 1:
+        assert data_pack_len - count_pack == num_samples - count_unpack
+        # Copy the rest of the shape, it is unpacked
+        decompressed_shape[count_unpack:] = data_pack[count_pack:]
+
+    decompressed_shape = np.cumsum(decompressed_shape)
+    return decompressed_shape

+ 316 - 0
LF_scanner/pypulseq/event_lib.py

@@ -0,0 +1,316 @@
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+try:
+    from typing import Self
+except ImportError:
+    from typing import TypeVar
+
+    Self = TypeVar('Self', bound='EventLibrary')
+
+import math
+import numpy as np
+
+
+class EventLibrary:
+    """
+    Defines an event library ot maintain a list of events. Provides methods to insert new data and find existing data.
+
+    Sequence Properties:
+    - data - A struct array with field 'array' to store data of varying lengths, remaining compatible with codegen.
+    - type - Type to distinguish events in the same class (e.g. trapezoids and arbitrary gradients)
+
+    Sequence Methods:
+    - find - Find an event in the library
+    - insert - Add a new event to the library
+
+    See also `Sequence.py`.
+
+    Attributes
+    ----------
+    data : dict{str: numpy.array}
+        Key-value pairs of event keys and corresponding data.
+    type : dict{str, str}
+        Key-value pairs of event keys and corresponding event types.
+    keymap : dict{str, int}
+        Key-value pairs of data values and corresponding event keys.
+    """
+
+    def __init__(self, numpy_data=False):
+        self.data = dict()
+        self.type = dict()
+        self.keymap = dict()
+        self.next_free_ID = 1
+        self.numpy_data = numpy_data
+
+    def __str__(self) -> str:
+        s = "EventLibrary:"
+        s += "\ndata: " + str(len(self.data))
+        s += "\ntype: " + str(len(self.type))
+        return s
+
+    def find(self, new_data: np.ndarray) -> Tuple[int, bool]:
+        """
+        Finds data `new_data` in event library.
+
+        Parameters
+        ----------
+        new_data : numpy.ndarray
+            Data to be found in event library.
+
+        Returns
+        -------
+        key_id : int
+            Key of `new_data` in event library, if found.
+        found : bool
+            If `new_data` was found in the event library or not.
+        """
+        if self.numpy_data:
+            new_data = np.asarray(new_data)
+            key = new_data.tobytes()
+        else:
+            key = tuple(new_data)
+
+        if key in self.keymap:
+            key_id = self.keymap[key]
+            found = True
+        else:
+            key_id = self.next_free_ID
+            found = False
+
+        return key_id, found
+
+    def find_or_insert(
+            self, new_data: np.ndarray, data_type: str = str()
+    ) -> Tuple[int, bool]:
+        """
+        Lookup a data structure in the given library and return the index of the data in the library. If the data does
+        not exist in the library it is inserted right away. The data is a 1xN array with event-specific data.
+
+        See also  insert `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+        Parameters
+        ----------
+        new_data : numpy.ndarray
+            Data to be found (or added, if not found) in event library.
+        data_type : str, default=str()
+            Type of data.
+
+        Returns
+        -------
+        key_id : int
+            Key of `new_data` in event library, if found.
+        found : bool
+            If `new_data` was found in the event library or not.
+        """
+
+        if self.numpy_data:
+            new_data = np.asarray(new_data)
+            new_data.flags.writeable = False
+            key = new_data.tobytes()
+        else:
+            key = tuple(new_data)
+
+        if key in self.keymap:
+            key_id = self.keymap[key]
+            found = True
+        else:
+            key_id = self.next_free_ID
+            found = False
+
+            # Insert
+            self.data[key_id] = new_data
+
+            if data_type != str():
+                self.type[key_id] = data_type
+
+            self.keymap[key] = key_id
+            self.next_free_ID = key_id + 1  # Update next_free_id
+
+        return key_id, found
+
+    def insert(self, key_id: int, new_data: np.ndarray, data_type: str = str()) -> int:
+        """
+        Add event to library.
+
+        See also `pypulseq.event_library.EventLibrary.find()`.
+
+        Parameters
+        ----------
+        key_id : int
+            Key of `new_data`.
+        new_data : numpy.ndarray
+            Data to be inserted into event library.
+        data_type : str, default=str()
+            Data type of `new_data`.
+
+        Returns
+        -------
+        key_id : int
+            Key ID of inserted event.
+        """
+        if isinstance(key_id, float):
+            key_id = int(key_id)
+
+        if key_id == 0:
+            key_id = self.next_free_ID
+
+        if self.numpy_data:
+            new_data = np.asarray(new_data)
+            new_data.flags.writeable = False
+            key = new_data.tobytes()
+        else:
+            key = tuple(new_data)
+
+        self.data[key_id] = new_data
+        if data_type != str():
+            self.type[key_id] = data_type
+
+        self.keymap[key] = key_id
+
+        if key_id >= self.next_free_ID:
+            self.next_free_ID = key_id + 1  # Update next_free_id
+
+        return key_id
+
+    def get(self, key_id: int) -> dict:
+        """
+
+        Parameters
+        ----------
+        key_id : int
+
+        Returns
+        -------
+        dict
+        """
+        return {
+            "key": key_id,
+            "data": self.data[key_id],
+            "type": self.type[key_id],
+        }
+
+    def out(self, key_id: int) -> SimpleNamespace:
+        """
+        Get element from library by key.
+
+        See also `pypulseq.event_library.EventLibrary.find()`.
+
+        Parameters
+        ----------
+        key_id : int
+
+        Returns
+        -------
+        out : SimpleNamespace
+        """
+        out = SimpleNamespace()
+        out.key = key_id
+        out.data = self.data[key_id]
+        out.type = self.type[key_id]
+
+        return out
+
+    def update(
+            self,
+            key_id: int,
+            old_data: np.ndarray,
+            new_data: np.ndarray,
+            data_type: str = str(),
+    ):
+        """
+        Parameters
+        ----------
+        key_id : int
+        old_data : numpy.ndarray (Ignored!)
+        new_data : numpy.ndarray
+        data_type : str, default=str()
+        """
+        if key_id in self.data:
+            if self.data[key_id] in self.keymap:
+                del self.keymap[self.data[key_id]]
+
+        self.insert(key_id, new_data, data_type)
+
+    def update_data(
+            self,
+            key_id: int,
+            old_data: np.ndarray,
+            new_data: np.ndarray,
+            data_type: str = str(),
+    ):
+        """
+        Parameters
+        ----------
+        key_id : int
+        old_data : np.ndarray (Ignored!)
+        new_data : np.ndarray
+        data_type : str
+        """
+        self.update(key_id, old_data, new_data, data_type)
+
+    def remove_duplicates(self, digits: Union[int, Tuple[int]]) -> Tuple[Self, dict]:
+        """
+        Remove duplicate events from this event library by rounding the data
+        according to the significant `digits` specification, and then removing
+        duplicate events.
+        Returns a new event library, leaving the current one intact.
+
+        Parameters
+        ----------
+        digits : Union[int, List[int]]
+            For libraries with `numpy_data == True`:
+                A single number specifying the number of significant digits
+                after rounding.
+            Otherwise:
+                A tuple of numbers specifying the number of significant digits
+                after rounding for each entry in the event data tuple.
+
+        Returns
+        -------
+        new_library : EventLibrary
+            Event library with the duplicate events removed
+        mapping : dict
+            Dictionary containing a mapping of IDs in the old library to IDs
+            in the new library.
+        """
+
+        def round_data(data: Tuple[float], digits: Tuple[int]) -> Tuple[float]:
+            """
+            Round the data tuple to a specified number of significant digits,
+            specified by `digits`. Rounding behaviour is similar to the {.Ng}
+            format specifier if N > 0, and similar to {.0f} otherwise.
+            """
+            return tuple(round(d, dig - int(math.ceil(math.log10(abs(d) + 1e-12))) if dig > 0 else -dig) for d, dig in
+                         zip(data, digits))
+
+        def round_data_numpy(data: np.ndarray, digits: int) -> np.ndarray:
+            """
+            Round the data array to a specified number of significant digits,
+            specified by `digits`. Rounding behaviour is similar to the {.Ng}
+            format specifier if N > 0, and similar to {.0f} otherwise.
+            """
+            mags = 10 ** (digits - (np.ceil(np.log10(abs(data) + 1e-12))) if digits > 0 else -digits)
+            result = np.round(data * mags) / mags
+            result.flags.writeable = False
+            return result
+
+        # Round library data based on `digits` specification
+        if self.numpy_data:
+            rounded_data = {x: round_data_numpy(self.data[x], digits) for x in self.data}
+        else:
+            rounded_data = {x: round_data(self.data[x], digits) for x in self.data}
+
+        # Initialize filtered library
+        new_library = EventLibrary(numpy_data=self.numpy_data)
+
+        # Initialize ID mapping. Always include 0:0 to allow the mapping dict
+        # to be used for mapping block_events (which can contain 0, i.e. no
+        # event)
+        mapping = {0: 0}
+
+        # Recreate library using rounded values
+        for k, v in sorted(rounded_data.items()):
+            mapping[k], _ = new_library.find_or_insert(v, self.type[k] if k in self.type else str())
+
+        return new_library, mapping

+ 66 - 0
LF_scanner/pypulseq/make_adc.py

@@ -0,0 +1,66 @@
+from types import SimpleNamespace
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_adc(
+    num_samples: int,
+    delay: float = 0,
+    duration: float = 0,
+    dwell: float = 0,
+    freq_offset: float = 0,
+    phase_offset: float = 0,
+    system: Opts = Opts(),
+) -> SimpleNamespace:
+    """
+    Create an ADC readout event.
+
+    Parameters
+    ----------
+    num_samples: int
+        Number of readout samples.
+    system : Opts, default=Opts()
+        System limits. Default is a system limits object initialised to default values.
+    dwell : float, default=0
+        ADC dead time in seconds (s) after sampling.
+    duration : float, default=0
+        Duration in seconds (s) of ADC readout event with `num_samples` number of samples.
+    delay : float, default=0
+        Delay in seconds (s) of ADC readout event.
+    freq_offset : float, default=0
+        Frequency offset of ADC readout event.
+    phase_offset : float, default=0
+        Phase offset of ADC readout event.
+
+    Returns
+    -------
+    adc : SimpleNamespace
+        ADC readout event.
+
+    Raises
+    ------
+    ValueError
+        If neither `dwell` nor `duration` are defined.
+    """
+    adc = SimpleNamespace()
+    adc.type = "adc"
+    adc.num_samples = num_samples
+    adc.dwell = dwell
+    adc.delay = delay
+    adc.freq_offset = freq_offset
+    adc.phase_offset = phase_offset
+    adc.dead_time = system.adc_dead_time
+
+    if (dwell == 0 and duration == 0) or (dwell > 0 and duration > 0):
+        raise ValueError("Either dwell or duration must be defined")
+
+    if duration > 0:
+        adc.dwell = duration / num_samples
+
+    if dwell > 0:
+        adc.duration = dwell * num_samples
+
+    if adc.dead_time > adc.delay:
+        adc.delay = adc.dead_time
+
+    return adc

+ 262 - 0
LF_scanner/pypulseq/make_adiabatic_pulse.py

@@ -0,0 +1,262 @@
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+from sigpy.mri.rf import hypsec, wurst
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.calc_rf_center import calc_rf_center
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_rf_uses
+
+
+def make_adiabatic_pulse(
+    pulse_type: str,
+    adiabaticity: int = 4,
+    bandwidth: int = 40000,
+    beta: int = 800,
+    delay: float = 0,
+    duration: float = 10e-3,
+    dwell: float = 0,
+    freq_offset: float = 0,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    n_fac: int = 40,
+    mu: float = 4.9,
+    phase_offset: float = 0,
+    return_gz: bool = False,
+    return_delay: bool = False,
+    slice_thickness: float = 0,
+    system=Opts(),
+    use: str = str(),
+) -> Union[
+    SimpleNamespace,
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace, SimpleNamespace],
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace],
+]:
+    """
+    Make an adiabatic inversion pulse.
+
+    Note: some parameters only affect certain pulse types and are ignored for other; e.g. bandwidth is ignored if
+    type='hypsec'.
+
+    hypsec(n=512, beta=800, mu=4.9, dur=0.012)
+        Design a hyperbolic secant adiabatic pulse. `mu` * `beta` becomes the amplitude of the frequency sweep.
+
+        Args:
+            - n (int): number of samples (should be a multiple of 4).
+            - beta (float): AM waveform parameter.
+            - mu (float): a constant, determines amplitude of frequency sweep.
+            - dur (float): pulse time (s).
+
+        Returns:
+            2-element tuple containing
+            - **a** (*array*): AM waveform.
+            - **om** (*array*): FM waveform (radians/s).
+
+        References:
+            Baum, J., Tycko, R. and Pines, A. (1985). 'Broadband and adiabatic
+            inversion of a two-level system by phase-modulated pulses'.
+            Phys. Rev. A., 32:3435-3447.
+
+    wurst(n=512, n_fac=40, bw=40000.0, dur=0.002)
+        Design a WURST (wideband, uniform rate, smooth truncation) adiabatic inversion pulse
+
+        Args:
+            - n (int): number of samples (should be a multiple of 4).
+            - n_fac (int): power to exponentiate to within AM term. ~20 or greater is typical.
+            - bw (float): pulse bandwidth.
+            - dur (float): pulse time (s).
+
+        Returns:
+            2-element tuple containing
+            - **a** (*array*): AM waveform.
+            - **om** (*array*): FM waveform (radians/s).
+
+        References:
+            Kupce, E. and Freeman, R. (1995). 'Stretched Adiabatic Pulses for
+            Broadband Spin Inversion'.
+            J. Magn. Reson. Ser. A., 117:246-256.
+
+    Parameters
+    ----------
+    pulse_type : str
+        One of 'hypsec' or 'wurst' pulse types.
+    adiabaticity : int, default=4
+    bandwidth : int, default=40000
+        Pulse bandwidth.
+    beta : int, default=800
+        AM waveform parameter.
+    delay : float, default=0
+        Delay in seconds (s).
+    duration : float, default=10e-3
+        Pulse time (s).
+    dwell : float, default=0
+    freq_offset : float, default=0
+    max_grad : float, default=0
+        Maximum gradient strength.
+    max_slew : float, default=0
+        Maximum slew rate.
+    mu : float, default=4.9
+        Constant determining amplitude of frequency sweep.
+    n_fac : int, default=40
+        Power to exponentiate to within AM term. ~20 or greater is typical.
+    phase_offset : float, default=0
+        Phase offset.
+    return_delay : bool, default=False
+        Boolean flag to indicate if the delay has to be returned.
+    return_gz : bool, default=False
+        Boolean flag to indicate if the slice-selective gradient has to be returned.
+    slice_thickness : float, default=0
+    system : Opts, default=Opts()
+        System limits.
+    use : str
+        Whether it is a 'refocusing' pulse (for k-space calculation).
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Adiabatic RF pulse event.
+    gz : SimpleNamespace, optional
+        Slice-selective trapezoid event.
+    gzr : SimpleNamespace, optional
+        Slice-select rephasing trapezoid event.
+    delay : SimpleNamespace, optional
+        Delay event.
+
+    Raises
+    ------
+    ValueError
+        If invalid pulse type is encountered.
+        If invalid pulse use is encountered.
+        If slice thickness is not provided but slice-selective trapezoid event is expected.
+    """
+    valid_pulse_types = ["hypsec", "wurst"]
+    if pulse_type != "" and pulse_type not in valid_pulse_types:
+        raise ValueError(
+            f"Invalid type parameter. Must be one of {valid_pulse_types}.Passed: {pulse_type}"
+        )
+    valid_pulse_uses = get_supported_rf_uses()
+    if use != "" and use not in valid_pulse_uses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of {valid_pulse_uses}. Passed: {use}"
+        )
+
+    if dwell == 0:
+        dwell = system.rf_raster_time
+
+    n_raw = np.round(duration / dwell + eps)
+    # Number of points must be divisible by 4 - requirement of sigpy.mri
+    N = np.floor(n_raw / 4) * 4
+
+    if pulse_type == "hypsec":
+        am, fm = hypsec(n=N, beta=beta, mu=mu, dur=duration)
+    elif pulse_type == "wurst":
+        am, fm = wurst(n=N, n_fac=n_fac, bw=bandwidth, dur=duration)
+    else:
+        raise ValueError("Unsupported adiabatic pulse type.")
+
+    pm = np.cumsum(fm) * dwell
+
+    ifm = np.argmin(np.abs(fm))
+    dfm = np.abs(fm)[ifm]
+
+    # Find rate of change of frequency at the center of the pulse
+    if dfm == 0:
+        pm0 = pm[ifm]
+        am0 = am[ifm]
+        roc_fm0 = np.abs(fm[ifm + 1] - fm[ifm - 1]) / 2 / dwell
+    else:  # We need to bracket the zero-crossing
+        if fm[ifm] * fm[ifm + 1] < 0:
+            b = 1
+        else:
+            b = -1
+
+        pm0 = (pm[ifm] * fm[ifm + b] - pm[ifm + b] * fm[ifm]) / (fm[ifm + b] - fm[ifm])
+        am0 = (am[ifm] * fm[ifm + b] - am[ifm + b] * fm[ifm]) / (fm[ifm + b] - fm[ifm])
+        roc_fm0 = np.abs(fm[ifm] - fm[ifm + b]) / dwell
+
+    pm -= pm0
+    a = (roc_fm0 * adiabaticity) ** 0.5 / 2 / np.pi / am0
+
+    signal = a * am * np.exp(1j * pm)
+
+    if N != n_raw:
+        n_pad = n_raw - N
+        signal = [
+            np.zeros(1, n_pad - np.floor(n_pad / 2)),
+            signal,
+            np.zeros(1, np.floor(n_pad / 2)),
+        ]
+        N = n_raw
+
+    t = (np.arange(1, N + 1) - 0.5) * dwell
+
+    rf = SimpleNamespace()
+    rf.type = "rf"
+    rf.signal = signal
+    rf.t = t
+    rf.shape_dur = N * dwell
+    rf.freq_offset = freq_offset
+    rf.phase_offset = phase_offset
+    rf.dead_time = system.rf_dead_time
+    rf.ringdown_time = system.rf_ringdown_time
+    rf.delay = delay
+    if use != "":
+        rf.use = use
+    else:
+        rf.use = "inversion"
+    if rf.dead_time > rf.delay:
+        rf.delay = rf.dead_time
+
+    if return_gz:
+        if slice_thickness <= 0:
+            raise ValueError("Slice thickness must be provided")
+
+        if max_grad > 0:
+            system.max_grad = max_grad
+
+        if max_slew > 0:
+            system.max_slew = max_slew
+
+        if pulse_type == "hypsec":
+            bandwidth = mu * beta / np.pi
+        elif pulse_type == "wurst":
+            bandwidth = bandwidth
+        else:
+            raise ValueError("Unsupported adiabatic pulse type.")
+
+        center_pos, _ = calc_rf_center(rf)
+
+        amplitude = bandwidth / slice_thickness
+        area = amplitude * duration
+        gz = make_trapezoid(
+            channel="z", system=system, flat_time=duration, flat_area=area
+        )
+        gzr = make_trapezoid(
+            channel="z",
+            system=system,
+            area=-area * (1 - center_pos) - 0.5 * (gz.area - area),
+        )
+
+        if rf.delay > gz.rise_time:  # Round-up to gradient raster
+            gz.delay = (
+                np.ceil((rf.delay - gz.rise_time) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+
+        if rf.delay < (gz.rise_time + gz.delay):
+            rf.delay = gz.rise_time + gz.delay
+
+    if rf.ringdown_time > 0 and return_delay:
+        delay = make_delay(calc_duration(rf) + rf.ringdown_time)
+
+    if return_gz and return_delay:
+        return rf, gz, gzr, delay
+    elif return_gz:
+        return rf, gz, gzr
+    else:
+        return rf

+ 77 - 0
LF_scanner/pypulseq/make_arbitrary_grad.py

@@ -0,0 +1,77 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_arbitrary_grad(
+    channel: str,
+    waveform: np.ndarray,
+    delay: float = 0,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    system: Opts = Opts(),
+) -> SimpleNamespace:
+    """
+    Creates a gradient event with arbitrary waveform.
+
+    See also `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+    Parameters
+    ----------
+    channel : str
+        Orientation of gradient event of arbitrary shape. Must be one of `x`, `y` or `z`.
+    waveform : numpy.ndarray
+        Arbitrary waveform.
+    system : Opts, default=Opts()
+        System limits.
+    max_grad : float, default=0
+        Maximum gradient strength.
+    max_slew : float, default=0
+        Maximum slew rate.
+    delay : float, default=0
+        Delay in seconds (s).
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Gradient event with arbitrary waveform.
+
+    Raises
+    ------
+    ValueError
+        If invalid `channel` is passed. Must be one of x, y or z.
+        If slew rate is violated.
+        If gradient amplitude is violated.
+    """
+    if channel not in ["x", "y", "z"]:
+        raise ValueError(
+            f"Invalid channel. Must be one of x, y or z. Passed: {channel}"
+        )
+
+    if max_grad <= 0:
+        max_grad = system.max_grad
+
+    if max_slew <= 0:
+        max_slew = system.max_slew
+
+    g = waveform
+    slew = np.squeeze(np.subtract(g[1:], g[:-1]) / system.grad_raster_time)
+    if max(abs(slew)) >= max_slew:
+        raise ValueError(f"Slew rate violation {max(abs(slew)) / max_slew * 100}")
+    if max(abs(g)) >= max_grad:
+        raise ValueError(f"Gradient amplitude violation {max(abs(g)) / max_grad * 100}")
+
+    grad = SimpleNamespace()
+    grad.type = "grad"
+    grad.channel = channel
+    grad.waveform = g
+    grad.delay = delay
+    # True timing and aux shape data
+    grad.tt = (np.arange(1, len(g) + 1) - 0.5) * system.grad_raster_time
+    grad.shape_dur = len(g) * system.grad_raster_time
+    grad.first = (3 * g[0] - g[1]) * 0.5  # Extrapolate by 1/2 gradient raster
+    grad.last = (g[-1] * 3 - g[-2]) * 0.5  # Extrapolate by 1/2 gradient raster
+
+    return grad

+ 154 - 0
LF_scanner/pypulseq/make_arbitrary_rf.py

@@ -0,0 +1,154 @@
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq import make_delay, calc_duration
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_rf_uses
+
+
+def make_arbitrary_rf(
+    signal: np.ndarray,
+    flip_angle: float,
+    bandwidth: float = 0,
+    delay: float = 0,
+    dwell: float = 0,
+    freq_offset: float = 0,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    phase_offset: float = 0,
+    return_delay: bool = False,
+    return_gz: bool = False,
+    slice_thickness: float = 0,
+    system: Opts = Opts(),
+    time_bw_product: float = 0,
+    use: str = str(),
+) -> Union[SimpleNamespace, Tuple[SimpleNamespace, SimpleNamespace]]:
+    """
+    Create an RF pulse with the given pulse shape.
+
+    Parameters
+    ----------
+    signal : numpy.ndarray
+        Arbitrary waveform.
+    flip_angle : float
+        Flip angle in radians.
+    bandwidth : float, default=0
+        Bandwidth in Hertz (Hz).
+    delay : float, default=0
+        Delay in seconds (s) of accompanying slice select trapezoidal event.
+    freq_offset : float, default=0
+        Frequency offset in Hertz (Hz).
+    max_grad : float, default=system.max_grad
+        Maximum gradient strength of accompanying slice select trapezoidal event.
+    max_slew : float, default=system.max_slew
+        Maximum slew rate of accompanying slice select trapezoidal event.
+    phase_offset : float, default=0
+        Phase offset in Hertz (Hz).a
+    return_delay : bool, default=False
+        Boolean flag to indicate if delay has to be returned.
+    return_gz : bool, default=False
+        Boolean flag to indicate if slice-selective gradient has to be returned.
+    slice_thickness : float, default=0
+        Slice thickness of accompanying slice select trapezoidal event. The slice thickness determines the area of the
+        slice select event.
+    system : Opts, default=Opts()
+        System limits.
+    time_bw_product : float, default=4
+        Time-bandwidth product.
+    use : str, default=str()
+        Use of arbitrary radio-frequency pulse event. Must be one of 'excitation', 'refocusing' or 'inversion'.
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Radio-frequency pulse event with arbitrary pulse shape.
+    gz : SimpleNamespace, optional
+        Slice select trapezoidal gradient event accompanying the arbitrary radio-frequency pulse event.
+
+    Raises
+    ------
+    ValueError
+        If invalid `use` parameter is passed. Must be one of 'excitation', 'refocusing' or 'inversion'.
+        If `signal` with ndim > 1 is passed.
+        If `return_gz=True`, and `slice_thickness` and `bandwidth` are not passed.
+    """
+    valid_use_pulses = get_supported_rf_uses()
+    if use != "" and use not in valid_use_pulses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of 'excitation', 'refocusing' or 'inversion'. Passed: {use}"
+        )
+
+    if dwell == 0:
+        dwell = system.rf_raster_time
+
+    signal = np.squeeze(signal)
+    if signal.ndim > 1:
+        raise ValueError(f"signal should have ndim=1. Passed ndim={signal.ndim}")
+    signal = signal / np.abs(np.sum(signal * dwell)) * flip_angle / (2 * np.pi)
+
+    N = len(signal)
+    duration = N * dwell
+    t = (np.arange(1, N + 1) - 0.5) * dwell
+
+    rf = SimpleNamespace()
+    rf.type = "rf"
+    rf.signal = signal
+    rf.t = t
+    rf.shape_dur = duration
+    rf.freq_offset = freq_offset
+    rf.phase_offset = phase_offset
+    rf.dead_time = system.rf_dead_time
+    rf.ringdown_time = system.rf_ringdown_time
+    rf.delay = delay
+
+    if use != "":
+        rf.use = use
+
+    if rf.dead_time > rf.delay:
+        rf.delay = rf.dead_time
+
+    if return_gz:
+        if slice_thickness <= 0:
+            raise ValueError("Slice thickness must be provided.")
+        if bandwidth <= 0:
+            raise ValueError("Bandwidth of pulse must be provided.")
+
+        if max_grad > 0:
+            system.max_grad = max_grad
+        if max_slew > 0:
+            system.max_slew = max_slew
+
+        BW = bandwidth
+        if time_bw_product > 0:
+            BW = time_bw_product / duration
+
+        amplitude = BW / slice_thickness
+        area = amplitude * duration
+        gz = make_trapezoid(
+            channel="z", system=system, flat_time=duration, flat_area=area
+        )
+
+        if rf.delay > gz.rise_time:
+            # Round-up to gradient raster
+            gz.delay = (
+                np.ceil((rf.delay - gz.rise_time) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+
+        if rf.delay < (gz.rise_time + gz.delay):
+            rf.delay = gz.rise_time + gz.delay
+
+    if rf.ringdown_time > 0 and return_delay:
+        delay = make_delay(calc_duration(rf) + rf.ringdown_time)
+
+    if return_gz and return_delay:
+        return rf, gz, delay
+    elif return_gz:
+        return rf, gz
+    else:
+        return rf

+ 106 - 0
LF_scanner/pypulseq/make_block_pulse.py

@@ -0,0 +1,106 @@
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_rf_uses
+
+
+def make_block_pulse(
+    flip_angle: float,
+    bandwidth: float = 0,
+    delay: float = 0,
+    duration: float = 4e-3,
+    freq_offset: float = 0,
+    phase_offset: float = 0,
+    return_delay: bool = False,
+    system: Opts = Opts(),
+    time_bw_product: float = 0,
+    use: str = str(),
+) -> Union[SimpleNamespace, Tuple[SimpleNamespace, SimpleNamespace]]:
+    """
+    Create a block pulse with optional slice selectiveness.
+
+    Parameters
+    ----------
+    flip_angle : float
+        Flip angle in radians.
+    bandwidth : float, default=0
+        Bandwidth in Hertz (hz).
+    delay : float, default=0
+        Delay in seconds (s) of accompanying slice select trapezoidal event.
+    duration : float, default=4e-3
+        Duration in seconds (s).
+    freq_offset : float, default=0
+        Frequency offset in Hertz (Hz).
+    phase_offset : float, default=0
+        Phase offset Hertz (Hz).
+    return_delay : bool, default=False
+        Boolean flag to indicate if the delay event has to be returned.
+    system : Opts, default=Opts()
+        System limits.
+    time_bw_product : float, default=0
+        Time-bandwidth product.
+    use : str, default=str()
+        Use of radio-frequency block pulse event. Must be one of 'excitation', 'refocusing' or 'inversion'.
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Radio-frequency block pulse event.
+    delay : SimpleNamespace, optional
+        Slice select trapezoidal gradient event accompanying the radio-frequency block pulse event.
+
+    Raises
+    ------
+    ValueError
+        If invalid `use` parameter is passed. Must be one of 'excitation', 'refocusing' or 'inversion'.
+        If neither `bandwidth` nor `duration` are passed.
+        If `return_gz=True`, and `slice_thickness` is not passed.
+    """
+    valid_use_pulses = get_supported_rf_uses()
+    if use != "" and use not in valid_use_pulses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of 'excitation', 'refocusing' or 'inversion'. Passed: {use}"
+        )
+
+    if duration == 0:
+        if time_bw_product > 0:
+            duration = time_bw_product / bandwidth
+        elif bandwidth > 0:
+            duration = 1 / (4 * bandwidth)
+        else:
+            raise ValueError("Either bandwidth or duration must be defined")
+
+    BW = 1 / (4 * duration)
+    N = np.round(duration / system.rf_raster_time)
+    t = np.array([0, N]) * system.rf_raster_time
+    signal = flip_angle / (2 * np.pi) / duration * np.ones_like(t)
+
+    rf = SimpleNamespace()
+    rf.type = "rf"
+    rf.signal = signal
+    rf.t = t
+    rf.shape_dur = t[-1]
+    rf.freq_offset = freq_offset
+    rf.phase_offset = phase_offset
+    rf.dead_time = system.rf_dead_time
+    rf.ringdown_time = system.rf_ringdown_time
+    rf.delay = delay
+
+    if use != "":
+        rf.use = use
+
+    if rf.dead_time > rf.delay:
+        rf.delay = rf.dead_time
+
+    if rf.ringdown_time > 0 and return_delay:
+        delay = make_delay(calc_duration(rf) + rf.ringdown_time)
+
+    if return_delay:
+        return rf, delay
+    else:
+        return rf

+ 30 - 0
LF_scanner/pypulseq/make_delay.py

@@ -0,0 +1,30 @@
+import numpy as np
+from types import SimpleNamespace
+
+
+def make_delay(d: float) -> SimpleNamespace:
+    """
+    Creates a delay event.
+
+    Parameters
+    ----------
+    d : float
+        Delay time in seconds (s).
+
+    Returns
+    -------
+    delay : SimpleNamespace
+        Delay event.
+
+    Raises
+    ------
+    ValueError
+        If delay is invalid (not finite or < 0).
+    """
+
+    delay = SimpleNamespace()
+    if not np.isfinite(d) or d < 0:
+        raise ValueError("Delay {:.2f} ms is invalid".format(d * 1e3))
+    delay.type = "delay"
+    delay.delay = d
+    return delay

+ 47 - 0
LF_scanner/pypulseq/make_digital_output_pulse.py

@@ -0,0 +1,47 @@
+from types import SimpleNamespace
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_digital_output_pulse(
+    channel: str, delay: float = 0, duration: float = 4e-3, system: Opts = Opts()
+) -> SimpleNamespace:
+    """
+    Create a digital output pulse event a.k.a. trigger. Creates an output trigger event on a given channel with optional
+    given delay and duration.
+
+    Parameters
+    ----------
+    channel : str
+        Must be one of 'osc0','osc1', or 'ext1'.
+    delay : float, default=0
+        Delay in seconds (s).
+    duration : float, default=4e-3
+        Duration of trigger event in seconds (s).
+    system : Opts, default=Opts()
+        System limits.
+
+    Returns
+    ------
+    trig : SimpleNamespace
+        Trigger event.
+
+    Raises
+    ------
+    ValueError
+        If `channel` is invalid. Must be one of 'osc0','osc1', or 'ext1'.
+    """
+    if channel not in ["osc0", "osc1", "ext1"]:
+        raise ValueError(
+            f"Channel {channel} is invalid. Must be one of 'osc0','osc1', or 'ext1'."
+        )
+
+    trig = SimpleNamespace()
+    trig.type = "output"
+    trig.channel = channel
+    trig.delay = delay
+    trig.duration = duration
+    if trig.duration <= system.grad_raster_time:
+        trig.duration = system.grad_raster_time
+
+    return trig

+ 143 - 0
LF_scanner/pypulseq/make_extended_trapezoid.py

@@ -0,0 +1,143 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.make_arbitrary_grad import make_arbitrary_grad
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.points_to_waveform import points_to_waveform
+
+
+def make_extended_trapezoid(
+    channel: str,
+    amplitudes: np.ndarray = np.zeros(1),
+    convert_to_arbitrary: bool = False,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    skip_check: bool = False,
+    system: Opts = Opts(),
+    times: np.ndarray = np.zeros(1),
+) -> SimpleNamespace:
+    """
+    Create a gradient by specifying a set of points (amplitudes) at specified time points(times) at a given channel
+    with given system limits. Returns an arbitrary gradient object.
+
+    See also:
+    - `pypulseq.Sequence.sequence.Sequence.add_block()`
+    - `pypulseq.opts.Opts`
+    - `pypulseq.make_trapezoid.make_trapezoid()`
+
+    Parameters
+    ----------
+    channel : str
+        Orientation of extended trapezoidal gradient event. Must be one of 'x', 'y' or 'z'.
+    convert_to_arbitrary : bool, default=False
+        Boolean flag to indicate if the extended trapezoid gradient has to be converted into an arbitrary gradient.
+    amplitudes : numpy.ndarray, default=09
+        Values defined at `times` time indices.
+    max_grad : float, default=0
+        Maximum gradient strength.
+    max_slew : float, default=0
+        Maximum slew rate.
+    system : Opts, default=Opts()
+        System limits.
+    skip_check : bool, default=False
+        Boolean flag to indicate if amplitude check is to be skipped.
+    times : numpy.ndarray, default=np.zeros(1)
+        Time points at which `amplitudes` defines amplitude values.
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Extended trapezoid gradient event.
+
+    Raises
+    ------
+    ValueError
+        If invalid `channel` is passed. Must be one of 'x', 'y' or 'z'.
+        If all elements in `times` are zero.
+        If elements in `times` are not in ascending order or not distinct.
+        If all elements in `amplitudes` are zero.
+        If first amplitude of a gradient is non-ero and does not connect to a previous block.
+    """
+    if channel not in ["x", "y", "z"]:
+        raise ValueError(
+            f"Invalid channel. Must be one of 'x', 'y' or 'z'. Passed: {channel}"
+        )
+
+    times = np.asarray(times)
+    amplitudes = np.asarray(amplitudes)
+
+    if len(times) != len(amplitudes):
+        raise ValueError("Times and amplitudes must have the same length.")
+
+    if np.all(times == 0):
+        raise ValueError("At least one of the given times must be non-zero")
+
+    if np.any(np.diff(times) <= 0):
+        raise ValueError(
+            "Times must be in ascending order and all times must be distinct"
+        )
+
+    if (
+        np.abs(
+            np.round(times[-1] / system.grad_raster_time) * system.grad_raster_time
+            - times[-1]
+        )
+        > eps
+    ):
+        raise ValueError("The last time point must be on a gradient raster")
+
+    if skip_check is False and times[0] > 0 and amplitudes[0] != 0:
+        raise ValueError(
+            "If first amplitude of a gradient is non-zero, it must connect to previous block"
+        )
+
+    if max_grad <= 0:
+        max_grad = system.max_grad
+
+    if max_slew <= 0:
+        max_slew = system.max_slew
+
+    if convert_to_arbitrary:
+        # Represent the extended trapezoid on the regularly sampled time grid
+        waveform = points_to_waveform(
+            times=times, amplitudes=amplitudes, grad_raster_time=system.grad_raster_time
+        )
+        grad = make_arbitrary_grad(
+            channel=channel,
+            waveform=waveform,
+            system=system,
+            max_slew=max_slew,
+            max_grad=max_grad,
+            delay=times[0],
+        )
+    else:
+        #  Keep the original possibly irregular sampling
+        if np.any(
+            np.abs(
+                np.round(times / system.grad_raster_time) * system.grad_raster_time
+                - times
+            )
+            > eps
+        ):
+            raise ValueError(
+                'All time points must be on a gradient raster or "convert_to_arbitrary" option must be used.'
+            )
+
+        grad = SimpleNamespace()
+        grad.type = "grad"
+        grad.channel = channel
+        grad.waveform = amplitudes
+        grad.delay = (
+            np.round(times[0] / system.grad_raster_time) * system.grad_raster_time
+        )
+        grad.tt = times - grad.delay
+        grad.shape_dur = (
+            np.round(times[-1] / system.grad_raster_time) * system.grad_raster_time
+        )
+
+    grad.first = amplitudes[0]
+    grad.last = amplitudes[-1]
+
+    return grad

+ 133 - 0
LF_scanner/pypulseq/make_extended_trapezoid_area.py

@@ -0,0 +1,133 @@
+import math
+from types import SimpleNamespace
+from typing import Tuple
+
+import numpy as np
+from scipy.optimize import minimize
+
+from LF_scanner.pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_extended_trapezoid_area(
+    area: float, channel: str, grad_end: float, grad_start: float, system: Opts
+) -> Tuple[SimpleNamespace, np.array, np.array]:
+    """
+    Makes the shortest possible extended trapezoid with a given area which starts and ends (optionally) as non-zero
+    gradient values.
+
+    Parameters
+    ----------
+    channel : str
+        Orientation of extended trapezoidal gradient event. Must be one of 'x', 'y' or 'z'.
+    grad_start : float
+        Starting non-zero gradient value.
+    grad_end : float
+        Ending non-zero gradient value.
+    area : float
+        Area of extended trapezoid.
+    system: Opts
+        System limits.
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Extended trapezoid event.
+    times : numpy.ndarray
+    amplitude : numpy.ndarray
+    """
+    SR = system.max_slew * 0.99
+
+    Tp = 0
+    obj1 = (
+        lambda x: (
+            area - __testGA(x, 0, SR, system.grad_raster_time, grad_start, grad_end)
+        )
+        ** 2
+    )
+    arr_res = [
+        minimize(fun=obj1, x0=-system.max_grad, method="Nelder-Mead"),
+        minimize(fun=obj1, x0=0, method="Nelder-Mead"),
+        minimize(fun=obj1, x0=system.max_grad, method="Nelder-Mead"),
+    ]
+    arr_res = np.array([(*res.x, res.fun) for res in arr_res])
+    Gp, obj1val = arr_res[:, 0], arr_res[:, 1]
+    i_min = np.argmin(obj1val)
+    Gp = Gp[i_min]
+    obj1val = obj1val[i_min]
+
+    if obj1val > 1e-3 or np.abs(Gp) > system.max_grad:  # Search did not converge
+        Gp = system.max_grad * np.sign(Gp)
+        obj2 = (
+            lambda x: (
+                area
+                - __testGA(Gp, x, SR, system.grad_raster_time, grad_start, grad_end)
+            )
+            ** 2
+        )
+        res2 = minimize(fun=obj2, x0=0, method="Nelder-Mead")
+        T, obj2val = *res2.x, res2.fun
+        assert obj2val < 1e-2
+
+        Tp = np.ceil(T / system.grad_raster_time) * system.grad_raster_time
+
+        # Fix the ramps
+        Tru = (
+            np.ceil(np.abs(Gp - grad_start) / SR / system.grad_raster_time)
+            * system.grad_raster_time
+        )
+        Trd = (
+            np.ceil(np.abs(Gp - grad_end) / SR / system.grad_raster_time)
+            * system.grad_raster_time
+        )
+        obj3 = lambda x: (area - __testGA1(x, Tru, Tp, Trd, grad_start, grad_end)) ** 2
+
+        res = minimize(fun=obj3, x0=Gp, method="Nelder-Mead")
+        Gp, obj3val = *res.x, res.fun
+        assert obj3val < 1e-3  # Did the final search converge?
+
+    assert Tp >= 0
+
+    if Tp > 0:
+        times = np.cumsum([0, Tru, Tp, Trd])
+        amplitudes = [grad_start, Gp, Gp, grad_end]
+    else:
+        Tru = (
+            np.ceil(np.abs(Gp - grad_start) / SR / system.grad_raster_time)
+            * system.grad_raster_time
+        )
+        Trd = (
+            np.ceil(np.abs(Gp - grad_end) / SR / system.grad_raster_time)
+            * system.grad_raster_time
+        )
+
+        if Trd > 0:
+            if Tru > 0:
+                times = np.cumsum([0, Tru, Trd])
+                amplitudes = np.array([grad_start, Gp, grad_end])
+            else:
+                times = np.cumsum([0, Trd])
+                amplitudes = np.array([grad_start, grad_end])
+        else:
+            times = np.cumsum([0, Tru])
+            amplitudes = np.array([grad_start, grad_end])
+
+    grad = make_extended_trapezoid(
+        channel=channel, system=system, times=times, amplitudes=amplitudes
+    )
+    grad.area = __testGA1(Gp, Tru, Tp, Trd, grad_start, grad_end)
+
+    assert np.abs(grad.area - area) < 1e-3
+
+    return grad, times, amplitudes
+
+
+def __testGA(Gp, Tp, SR, dT, Gs, Ge):
+    Tru = np.ceil(np.abs(Gp - Gs) / SR / dT) * dT
+    Trd = np.ceil(np.abs(Gp - Ge) / SR / dT) * dT
+    ga = __testGA1(Gp, Tru, Tp, Trd, Gs, Ge)
+    return ga
+
+
+def __testGA1(Gp, Tru, Tp, Trd, Gs, Ge):
+    return 0.5 * Tru * (Gp + Gs) + Gp * Tp + 0.5 * (Gp + Ge) * Trd

+ 179 - 0
LF_scanner/pypulseq/make_gauss_pulse.py

@@ -0,0 +1,179 @@
+import math
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_gauss_pulse(
+    flip_angle: float,
+    apodization: float = 0,
+    bandwidth: float = 0,
+    center_pos: float = 0.5,
+    delay: float = 0,
+    dwell: float = 0,
+    duration: float = 4e-3,
+    freq_offset: float = 0,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    phase_offset: float = 0,
+    return_gz: bool = False,
+    return_delay: bool = False,
+    slice_thickness: float = 0,
+    system: Opts = Opts(),
+    time_bw_product: float = 4,
+    use: str = str(),
+) -> Union[
+    SimpleNamespace,
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace],
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace, SimpleNamespace],
+]:
+    """
+    Create a [optionally slice selective] Gauss pulse.
+
+    See also `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+    Parameters
+    ----------
+    flip_angle : float
+        Flip angle in radians.
+    apodization : float, default=0
+        Apodization.
+    bandwidth : float, default=0
+        Bandwidth in Hertz (Hz).
+    center_pos : float, default=0.5
+        Position of peak.
+    delay : float, default=0
+        Delay in seconds (s).
+    dwell : float, default=0
+    duration : float, default=4e-3
+        Duration in seconds (s).
+    freq_offset : float, default=0
+        Frequency offset in Hertz (Hz).
+    max_grad : float, default=0
+        Maximum gradient strength of accompanying slice select trapezoidal event.
+    max_slew : float, default=0
+        Maximum slew rate of accompanying slice select trapezoidal event.
+    phase_offset : float, default=0
+        Phase offset in Hertz (Hz).
+    return_delay : bool, default=False
+        Boolean flag to indicate if the delay event has to be returned.
+    return_gz : bool, default=False
+        Boolean flag to indicate if the slice-selective gradient has to be returned.
+    slice_thickness : float, default=0
+        Slice thickness of accompanying slice select trapezoidal event. The slice thickness determines the area of the
+        slice select event.
+    system : Opts, default=Opts()
+        System limits.
+    time_bw_product : int, default=4
+        Time-bandwidth product.
+    use : str, default=str()
+        Use of radio-frequency gauss pulse event. Must be one of 'excitation', 'refocusing' or 'inversion'.
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Radio-frequency gauss pulse event.
+    gz : SimpleNamespace, optional
+        Accompanying slice select trapezoidal gradient event.
+    gzr : SimpleNamespace, optional
+        Accompanying slice select rephasing trapezoidal gradient event.
+    delay : SimpleNamespace, optional
+        Delay event.
+
+    Raises
+    ------
+    ValueError
+        If invalid `use` is passed. Must be one of 'excitation', 'refocusing' or 'inversion'.
+        If `return_gz=True` and `slice_thickness` was not passed.
+    """
+    valid_use_pulses = ["excitation", "refocusing", "inversion"]
+    if use != "" and use not in valid_use_pulses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of 'excitation', 'refocusing' or 'inversion'. Passed: {use}"
+        )
+
+    if dwell == 0:
+        dwell = system.rf_raster_time
+
+    if bandwidth == 0:
+        BW = time_bw_product / duration
+    else:
+        BW = bandwidth
+    alpha = apodization
+    N = int(np.round(duration / dwell))
+    t = (np.arange(1, N + 1) - 0.5) * dwell
+    tt = t - (duration * center_pos)
+    window = 1 - alpha + alpha * np.cos(2 * np.pi * tt / duration)
+    signal = window * __gauss(BW * tt)
+    flip = np.sum(signal) * dwell * 2 * np.pi
+    signal = signal * flip_angle / flip
+
+    rf = SimpleNamespace()
+    rf.type = "rf"
+    rf.signal = signal
+    rf.t = t
+    rf.shape_dur = N * dwell
+    rf.freq_offset = freq_offset
+    rf.phase_offset = phase_offset
+    rf.dead_time = system.rf_dead_time
+    rf.ringdown_time = system.rf_ringdown_time
+    rf.delay = delay
+    if use != "":
+        rf.use = use
+
+    if rf.dead_time > rf.delay:
+        rf.delay = rf.dead_time
+
+    if return_gz:
+        if slice_thickness == 0:
+            raise ValueError("Slice thickness must be provided")
+
+        if max_grad > 0:
+            system.max_grad = max_grad
+
+        if max_slew > 0:
+            system.max_slew = max_slew
+
+        amplitude = BW / slice_thickness
+        area = amplitude * duration
+        gz = make_trapezoid(
+            channel="z", system=system, flat_time=duration, flat_area=area
+        )
+        gzr = make_trapezoid(
+            channel="z",
+            system=system,
+            area=-area * (1 - center_pos) - 0.5 * (gz.area - area),
+        )
+
+        if rf.delay > gz.rise_time:
+            gz.delay = (
+                np.ceil((rf.delay - gz.rise_time) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+
+        if rf.delay < (gz.rise_time + gz.delay):
+            rf.delay = gz.rise_time + gz.delay
+
+    if rf.ringdown_time > 0 and return_delay:
+        delay = make_delay(calc_duration(rf) + rf.ringdown_time)
+
+    # Following 2 lines of code are workarounds for numpy returning 3.14... for np.angle(-0.00...)
+    negative_zero_indices = np.where(rf.signal == -0.0)
+    rf.signal[negative_zero_indices] = 0
+
+    if return_gz and return_delay:
+        return rf, gz, gzr, delay
+    elif return_gz:
+        return rf, gz, gzr
+    else:
+        return rf
+
+
+def __gauss(x: np.ndarray) -> np.ndarray:
+    return np.exp(-np.pi * np.square(x))

+ 56 - 0
LF_scanner/pypulseq/make_label.py

@@ -0,0 +1,56 @@
+from types import SimpleNamespace
+from typing import Union
+
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_labels
+
+
+def make_label(
+    label: str, type: str, value: Union[bool, float, int]
+) -> SimpleNamespace:
+    """
+    Create an ADC Label.
+
+    Parameters
+    ----------
+    type : str
+        Label type. Must be one of 'SET' or 'INC'.
+    label : str
+        Must be one of 'SLC', 'SEG', 'REP', 'AVG', 'SET', 'ECO', 'PHS', 'LIN', 'PAR', 'NAV', 'REV', or 'SMS'.
+    value : bool, float or int
+        Label value.
+
+    Returns
+    -------
+    out : SimpleNamespace
+        Label object.
+
+    Raises
+    ------
+    ValueError
+        If a valid `label` was not passed. Must be one of 'SLC', 'SEG', 'REP', 'AVG', 'SET', 'ECO', 'PHS', 'LIN', 'PAR',
+                                                                                                NAV', 'REV', or 'SMS'.
+        If a valid `type` was not passed. Must be one of 'SET' or 'INC'.
+        If `value` was not a valid numerical or logical value.
+    """
+    arr_supported_labels = get_supported_labels()
+
+    if label not in arr_supported_labels:
+        raise ValueError(
+            "Invalid label. Must be one of 'SLC', 'SEG', 'REP', 'AVG', 'SET', 'ECO', 'PHS', 'LIN', 'PAR', "
+            "NAV', 'REV', or 'SMS'."
+        )
+    if type not in ["SET", "INC"]:
+        raise ValueError("Invalid type. Must be one of 'SET' or 'INC'.")
+    if not isinstance(value, (bool, float, int)):
+        raise ValueError("Must supply a valid numerical or logical value.")
+
+    out = SimpleNamespace()
+    if type == "SET":
+        out.type = "labelset"
+    elif type == "INC":
+        out.type = "labelinc"
+
+    out.label = label
+    out.value = value
+
+    return out

+ 268 - 0
LF_scanner/pypulseq/make_sigpy_pulse.py

@@ -0,0 +1,268 @@
+import math
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+import sigpy.mri.rf as rf
+import sigpy.plot as pl
+
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.sigpy_pulse_opts import SigpyPulseOpts
+
+
+def sigpy_n_seq(
+    flip_angle: float,
+    delay: float = 0,
+    duration: float = 4e-3,
+    freq_offset: float = 0,
+    center_pos: float = 0.5,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    phase_offset: float = 0,
+    return_gz: bool = True,
+    slice_thickness: float = 0,
+    system: Opts = Opts(),
+    time_bw_product: float = 4,
+    pulse_cfg: SigpyPulseOpts = SigpyPulseOpts(),
+    use: str = str(),
+) -> Union[SimpleNamespace, Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace]]:
+    """
+    Creates a radio-frequency sinc pulse event using the sigpy rf pulse library and optionally accompanying slice select, slice select rephasing
+    trapezoidal gradient events.
+
+    Parameters
+    ----------
+    flip_angle : float
+        Flip angle in radians.
+    apodization : float, optional, default=0
+        Apodization.
+    center_pos : float, optional, default=0.5
+        Position of peak.5 (midway).
+    delay : float, optional, default=0
+        Delay in seconds (s).
+    duration : float, optional, default=4e-3
+        Duration in seconds (s).
+    freq_offset : float, optional, default=0
+        Frequency offset in Hertz (Hz).
+    max_grad : float, optional, default=0
+        Maximum gradient strength of accompanying slice select trapezoidal event.
+    max_slew : float, optional, default=0
+        Maximum slew rate of accompanying slice select trapezoidal event.
+    phase_offset : float, optional, default=0
+        Phase offset in Hertz (Hz).
+    return_gz:bool, default=False
+        Boolean flag to indicate if slice-selective gradient has to be returned.
+    slice_thickness : float, optional, default=0
+        Slice thickness of accompanying slice select trapezoidal event. The slice thickness determines the area of the
+        slice select event.
+    system : Opts, optional
+        System limits. Default is a system limits object initialised to default values.
+    time_bw_product : float, optional, default=4
+        Time-bandwidth product.
+    use : str, optional, default=str()
+        Use of radio-frequency sinc pulse. Must be one of 'excitation', 'refocusing' or 'inversion'.
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Radio-frequency sinc pulse event.
+    gz : SimpleNamespace, optional
+        Accompanying slice select trapezoidal gradient event. Returned only if `slice_thickness` is provided.
+    gzr : SimpleNamespace, optional
+        Accompanying slice select rephasing trapezoidal gradient event. Returned only if `slice_thickness` is provided.
+
+    Raises
+    ------
+    ValueError
+        If invalid `use` parameter was passed. Must be one of 'excitation', 'refocusing' or 'inversion'.
+        If `return_gz=True` and `slice_thickness` was not provided.
+    """
+
+    valid_use_pulses = ["excitation", "refocusing", "inversion"]
+    if use != "" and use not in valid_use_pulses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of 'excitation', 'refocusing' or 'inversion'. Passed: {use}"
+        )
+
+    if pulse_cfg.pulse_type == "slr":
+        [signal, t, pulse] = make_slr(
+            flip_angle=flip_angle,
+            time_bw_product=time_bw_product,
+            duration=duration,
+            system=system,
+            pulse_cfg=pulse_cfg,
+            disp=True,
+        )
+    if pulse_cfg.pulse_type == "sms":
+        [signal, t, pulse] = make_sms(
+            flip_angle=flip_angle,
+            time_bw_product=time_bw_product,
+            duration=duration,
+            system=system,
+            pulse_cfg=pulse_cfg,
+            disp=True,
+        )
+
+    rfp = SimpleNamespace()
+    rfp.type = "rf"
+    rfp.signal = signal
+    rfp.t = t
+    rfp.freq_offset = freq_offset
+    rfp.phase_offset = phase_offset
+    rfp.dead_time = system.rf_dead_time
+    rfp.ringdown_time = system.rf_ringdown_time
+    rfp.delay = delay
+
+    if use != "":
+        rfp.use = use
+
+    if rfp.dead_time > rfp.delay:
+        rfp.delay = rfp.dead_time
+
+    if return_gz:
+        if slice_thickness == 0:
+            raise ValueError("Slice thickness must be provided")
+
+        if max_grad > 0:
+            system.max_grad = max_grad
+
+        if max_slew > 0:
+            system.max_slew = max_slew
+        BW = time_bw_product / duration
+        amplitude = BW / slice_thickness
+        area = amplitude * duration
+        gz = make_trapezoid(
+            channel="z", system=system, flat_time=duration, flat_area=area
+        )
+        gzr = make_trapezoid(
+            channel="z",
+            system=system,
+            area=-area * (1 - center_pos) - 0.5 * (gz.area - area),
+        )
+
+        if rfp.delay > gz.rise_time:
+            gz.delay = (
+                math.ceil((rfp.delay - gz.rise_time) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+
+        if rfp.delay < (gz.rise_time + gz.delay):
+            rfp.delay = gz.rise_time + gz.delay
+
+    if rfp.ringdown_time > 0:
+        t_fill = np.arange(1, round(rfp.ringdown_time / 1e-6) + 1) * 1e-6
+        rfp.t = np.concatenate((rfp.t, rfp.t[-1] + t_fill))
+        rfp.signal = np.concatenate((rfp.signal, np.zeros(len(t_fill))))
+
+    # Following 2 lines of code are workarounds for numpy returning 3.14... for np.angle(-0.00...)
+    negative_zero_indices = np.where(rfp.signal == -0.0)
+    rfp.signal[negative_zero_indices] = 0
+
+    if return_gz:
+        return rfp, gz, gzr, pulse
+    else:
+        return rfp
+
+
+def make_slr(
+    flip_angle: float,
+    time_bw_product: float = 4,
+    duration: float = 0,
+    system: Opts = Opts(),
+    pulse_cfg: SigpyPulseOpts = SigpyPulseOpts(),
+    disp: bool = False,
+):
+    N = int(round(duration / 1e-6))
+    t = np.arange(1, N + 1) * system.rf_raster_time
+
+    # Insert sigpy
+    ptype = pulse_cfg.ptype
+    ftype = pulse_cfg.ftype
+    d1 = pulse_cfg.d1
+    d2 = pulse_cfg.d2
+    cancel_alpha_phs = pulse_cfg.cancel_alpha_phs
+
+    pulse = rf.slr.dzrf(
+        n=N,
+        tb=time_bw_product,
+        ptype=ptype,
+        ftype=ftype,
+        d1=d1,
+        d2=d2,
+        cancel_alpha_phs=cancel_alpha_phs,
+    )
+    flip = np.sum(pulse) * system.rf_raster_time * 2 * np.pi
+    signal = pulse * flip_angle / flip
+
+    if disp:
+        pl.LinePlot(pulse)
+        pl.LinePlot(signal)
+
+        # Simulate it
+        [a, b] = rf.sim.abrm(
+            pulse,
+            np.arange(
+                -20 * time_bw_product, 20 * time_bw_product, 40 * time_bw_product / 2000
+            ),
+            True,
+        )
+        Mxy = 2 * np.multiply(np.conj(a), b)
+        pl.LinePlot(Mxy)
+
+    return signal, t, pulse
+
+
+def make_sms(
+    flip_angle: float,
+    time_bw_product: float = 4,
+    duration: float = 0,
+    system: Opts = Opts(),
+    pulse_cfg: SigpyPulseOpts = SigpyPulseOpts(),
+    disp: bool = False,
+):
+    N = int(round(duration / 1e-6))
+    t = np.arange(1, N + 1) * system.rf_raster_time
+
+    # Insert sigpy
+    ptype = pulse_cfg.ptype
+    ftype = pulse_cfg.ftype
+    d1 = pulse_cfg.d1
+    d2 = pulse_cfg.d2
+    cancel_alpha_phs = pulse_cfg.cancel_alpha_phs
+    n_bands = pulse_cfg.n_bands
+    band_sep = pulse_cfg.band_sep
+    phs_0_pt = pulse_cfg.phs_0_pt
+
+    pulse_in = rf.slr.dzrf(
+        n=N,
+        tb=time_bw_product,
+        ptype=ptype,
+        ftype=ftype,
+        d1=d1,
+        d2=d2,
+        cancel_alpha_phs=cancel_alpha_phs,
+    )
+    pulse = rf.multiband.mb_rf(
+        pulse_in, n_bands=n_bands, band_sep=band_sep, phs_0_pt=phs_0_pt
+    )
+
+    flip = np.sum(pulse) * system.rf_raster_time * 2 * np.pi
+    signal = pulse * flip_angle / flip
+
+    if disp:
+        pl.LinePlot(pulse_in)
+        pl.LinePlot(pulse)
+        pl.LinePlot(signal)
+        # Simulate it
+        [a, b] = rf.sim.abrm(
+            pulse,
+            np.arange(
+                -20 * time_bw_product, 20 * time_bw_product, 40 * time_bw_product / 2000
+            ),
+            True,
+        )
+        Mxy = 2 * np.multiply(np.conj(a), b)
+        pl.LinePlot(Mxy)
+
+    return signal, t, pulse

+ 172 - 0
LF_scanner/pypulseq/make_sinc_pulse.py

@@ -0,0 +1,172 @@
+import math
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq import make_delay, calc_duration
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+from LF_scanner.pypulseq.supported_labels_rf_use import get_supported_rf_uses
+
+
+def make_sinc_pulse(
+    flip_angle: float,
+    apodization: float = 0,
+    delay: float = 0,
+    duration: float = 4e-3,
+    dwell: float = 0,
+    center_pos: float = 0.5,
+    freq_offset: float = 0,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    phase_offset: float = 0,
+    return_delay: bool = False,
+    return_gz: bool = False,
+    slice_thickness: float = 0,
+    system: Opts = Opts(),
+    time_bw_product: float = 4,
+    use: str = str(),
+) -> Union[
+    SimpleNamespace,
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace],
+    Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace, SimpleNamespace],
+]:
+    """
+    Creates a radio-frequency sinc pulse event and optionally accompanying slice select and slice select rephasing
+    trapezoidal gradient events.
+
+    Parameters
+    ----------
+    flip_angle : float
+        Flip angle in radians.
+    apodization : float, default=0
+        Apodization.
+    center_pos : float, default=0.5
+        Position of peak.5 (midway).
+    delay : float, default=0
+        Delay in seconds (s).
+    duration : float, default=4e-3
+        Duration in seconds (s).
+    dwell : float, default=0
+    freq_offset : float, default=0
+        Frequency offset in Hertz (Hz).
+    max_grad : float, default=0
+        Maximum gradient strength of accompanying slice select trapezoidal event.
+    max_slew : float, default=0
+        Maximum slew rate of accompanying slice select trapezoidal event.
+    phase_offset : float, default=0
+        Phase offset in Hertz (Hz).
+    return_delay : bool, default=False
+        Boolean flag to indicate if the delay event has to be returned.
+    return_gz : bool, default=False
+        Boolean flag to indicate if slice-selective gradient has to be returned.
+    slice_thickness : float, default=0
+        Slice thickness of accompanying slice select trapezoidal event. The slice thickness determines the area of the
+        slice select event.
+    system : Opts, default=Opts()
+        System limits. Default is a system limits object initialised to default values.
+    time_bw_product : float, default=4
+        Time-bandwidth product.
+    use : str, default=str()
+        Use of radio-frequency sinc pulse. Must be one of 'excitation', 'refocusing' or 'inversion'.
+
+    See also `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+    Returns
+    -------
+    rf : SimpleNamespace
+        Radio-frequency sinc pulse event.
+    gz : SimpleNamespace, optional
+        Accompanying slice select trapezoidal gradient event. Returned only if `slice_thickness` is provided.
+    gzr : SimpleNamespace, optional
+        Accompanying slice select rephasing trapezoidal gradient event. Returned only if `slice_thickness` is provided.
+
+    Raises
+    ------
+    ValueError
+        If invalid `use` parameter was passed. Must be one of 'excitation', 'refocusing' or 'inversion'.
+        If `return_gz=True` and `slice_thickness` was not provided.
+    """
+    valid_pulse_uses = get_supported_rf_uses()
+    if use != "" and use not in valid_pulse_uses:
+        raise ValueError(
+            f"Invalid use parameter. Must be one of {valid_pulse_uses}. Passed: {use}"
+        )
+
+    if dwell == 0:
+        dwell = system.rf_raster_time
+
+    if duration <= 0:
+        raise ValueError("RF pulse duration must be positive.")
+
+    BW = time_bw_product / duration
+    alpha = apodization
+    N = int(np.round(duration / dwell))
+    t = (np.arange(1, N + 1) - 0.5) * dwell
+    tt = t - (duration * center_pos)
+    window = 1 - alpha + alpha * np.cos(2 * np.pi * tt / duration)
+    signal = np.multiply(window, np.sinc(BW * tt))
+    flip = np.sum(signal) * dwell * 2 * np.pi
+    signal = signal * flip_angle / flip
+
+    rf = SimpleNamespace()
+    rf.type = "rf"
+    rf.signal = signal
+    rf.t = t
+    rf.shape_dur = N * dwell
+    rf.freq_offset = freq_offset
+    rf.phase_offset = phase_offset
+    rf.dead_time = system.rf_dead_time
+    rf.ringdown_time = system.rf_ringdown_time
+    rf.delay = delay
+
+    if use != str():
+        rf.use = use
+
+    if rf.dead_time > rf.delay:
+        rf.delay = rf.dead_time
+
+    if return_gz:
+        if slice_thickness == 0:
+            raise ValueError("Slice thickness must be provided")
+
+        if max_grad > 0:
+            system.max_grad = max_grad
+
+        if max_slew > 0:
+            system.max_slew = max_slew
+
+        amplitude = BW / slice_thickness
+        area = amplitude * duration
+        gz = make_trapezoid(
+            channel="z", system=system, flat_time=duration, flat_area=area
+        )
+        gzr = make_trapezoid(
+            channel="z",
+            system=system,
+            area=-area * (1 - center_pos) - 0.5 * (gz.area - area),
+        )
+
+        if rf.delay > gz.rise_time:
+            gz.delay = (
+                np.ceil((rf.delay - gz.rise_time) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+
+        if rf.delay < (gz.rise_time + gz.delay):
+            rf.delay = gz.rise_time + gz.delay
+
+    if rf.ringdown_time > 0 and return_delay:
+        delay = make_delay(calc_duration(rf) + rf.ringdown_time)
+
+    # Following 2 lines of code are workarounds for numpy returning 3.14... for np.angle(-0.00...)
+    negative_zero_indices = np.where(rf.signal == -0.0)
+    rf.signal[negative_zero_indices] = 0
+
+    if return_gz and return_delay:
+        return rf, gz, gzr, delay
+    elif return_gz:
+        return rf, gz, gzr
+    else:
+        return rf

+ 203 - 0
LF_scanner/pypulseq/make_trapezoid.py

@@ -0,0 +1,203 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_trapezoid(
+    channel: str,
+    amplitude: float = 0,
+    area: float = None,
+    delay: float = 0,
+    duration: float = 0,
+    fall_time: float = 0,
+    flat_area: float = 0,
+    flat_time: float = -1,
+    max_grad: float = 0,
+    max_slew: float = 0,
+    rise_time: float = 0,
+    system: Opts = Opts(),
+) -> SimpleNamespace:
+    """
+    Create a trapezoidal gradient event.
+
+    See also:
+    - `pypulseq.Sequence.sequence.Sequence.add_block()`
+    - `pypulseq.opts.Opts`
+
+    Parameters
+    ----------
+    channel : str
+        Orientation of trapezoidal gradient event. Must be one of `x`, `y` or `z`.
+    amplitude : float, default=0
+        Amplitude.
+    area : float, default=None
+        Area.
+    delay : float, default=0
+        Delay in seconds (s).
+    duration : float, default=0
+        Duration in seconds (s).
+    flat_area : float, default=0
+        Flat area.
+    flat_time : float, default=-1
+        Flat duration in seconds (s). Default is -1 to account for triangular pulses.
+    max_grad : float, default=0
+        Maximum gradient strength.
+    max_slew : float, default=0
+        Maximum slew rate.
+    rise_time : float, default=0
+        Rise time in seconds (s).
+    system : Opts, default=Opts()
+        System limits.
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Trapezoidal gradient event created based on the supplied parameters.
+
+    Raises
+    ------
+    ValueError
+        If none of `area`, `flat_area` and `amplitude` are passed
+        If requested area is too large for this gradient
+        If `flat_time`, `duration` and `area` are not supplied.
+        Amplitude violation
+    """
+    if channel not in ["x", "y", "z"]:
+        raise ValueError(
+            f"Invalid channel. Must be one of `x`, `y` or `z`. Passed: {channel}"
+        )
+
+    if max_grad <= 0:
+        max_grad = system.max_grad
+
+    if max_slew <= 0:
+        max_slew = system.max_slew
+
+    if rise_time <= 0:
+        rise_time = 0.0
+
+    if fall_time > 0:
+        if rise_time == 0:
+            raise ValueError(
+                "Invalid arguments. Must always supply `rise_time` if `fall_time` is specified explicitly."
+            )
+    else:
+        fall_time = 0.0
+
+    if area is None and flat_area == 0 and amplitude == 0:
+        raise ValueError("Must supply either 'area', 'flat_area' or 'amplitude'.")
+
+    if flat_time != -1:
+        if amplitude != 0:
+            amplitude2 = amplitude
+        elif (area is not None) and (
+            rise_time > 0
+        ):  # We have rise_time, flat_time and area.
+            amplitude2 = area / (rise_time + flat_time)
+        else:
+            if flat_area == 0:
+                raise ValueError(
+                    "When `flat_time` is provided, either `flat_area` or `amplitude` must be provided as well; you may "
+                    "consider providing `duration`, `area` and optionally ramp times instead."
+                )
+            amplitude2 = flat_area / flat_time
+
+        if rise_time == 0:
+            rise_time = np.abs(amplitude2) / max_slew
+            rise_time = (
+                np.ceil(rise_time / system.grad_raster_time) * system.grad_raster_time
+            )
+            if rise_time == 0:
+                rise_time = system.grad_raster_time
+        if fall_time == 0:
+            fall_time = rise_time
+    elif duration > 0:
+        if amplitude == 0:
+            if rise_time == 0:
+                dC = 1 / np.abs(2 * max_slew) + 1 / np.abs(2 * max_slew)
+                possible = duration**2 > 4 * np.abs(area) * dC
+                assert possible, (
+                    f"Requested area is too large for this gradient. Minimum required duration is "
+                    f"{np.round(np.sqrt(4 * np.abs(area) * dC) * 1e6)} uss"
+                )
+                amplitude2 = (
+                    duration - np.sqrt(duration**2 - 4 * np.abs(area) * dC)
+                ) / (2 * dC)
+            else:
+                if fall_time == 0:
+                    fall_time = rise_time
+                amplitude2 = area / (duration - 0.5 * rise_time - 0.5 * fall_time)
+                possible = (
+                    duration > (rise_time + fall_time) and np.abs(amplitude2) < max_grad
+                )
+                assert possible, (
+                    f"Requested area is too large for this gradient. Probably amplitude is violated "
+                    f"{np.round(np.abs(amplitude) / max_grad * 100)}"
+                )
+
+        if rise_time == 0:
+            rise_time = (
+                np.ceil(np.abs(amplitude2) / max_slew / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+            if rise_time == 0:
+                rise_time = system.grad_raster_time
+
+        if fall_time == 0:
+            fall_time = rise_time
+        flat_time = duration - rise_time - fall_time
+
+        if amplitude == 0:
+            # Adjust amplitude (after rounding) to match area
+            amplitude2 = area / (rise_time / 2 + fall_time / 2 + flat_time)
+    else:
+        if area == 0:
+            raise ValueError("Must supply area or duration.")
+        else:
+            # Find the shortest possible duration. First check if the area can be realized as a triangle.
+            # If not, then it must be a trapezoid.
+            rise_time = (
+                np.ceil(np.sqrt(np.abs(area) / max_slew) / system.grad_raster_time)
+                * system.grad_raster_time
+            )
+            if rise_time < system.grad_raster_time:  # Area was almost 0 maybe
+                rise_time = system.grad_raster_time
+            amplitude2 = np.divide(area, rise_time)  # To handle nan
+            t_eff = rise_time
+
+            if np.abs(amplitude2) > max_grad:
+                t_eff = (
+                    np.ceil(np.abs(area) / max_grad / system.grad_raster_time)
+                    * system.grad_raster_time
+                )
+                amplitude2 = area / t_eff
+                rise_time = (
+                    np.ceil(np.abs(amplitude2) / max_slew / system.grad_raster_time)
+                    * system.grad_raster_time
+                )
+
+                if rise_time == 0:
+                    rise_time = system.grad_raster_time
+
+            flat_time = t_eff - rise_time
+            fall_time = rise_time
+
+    if np.abs(amplitude2) > max_grad:
+        raise ValueError("Amplitude violation.")
+
+    grad = SimpleNamespace()
+    grad.type = "trap"
+    grad.channel = channel
+    grad.amplitude = amplitude2
+    grad.rise_time = rise_time
+    grad.flat_time = flat_time
+    grad.fall_time = fall_time
+    grad.area = amplitude2 * (flat_time + rise_time / 2 + fall_time / 2)
+    grad.flat_area = amplitude2 * flat_time
+    grad.delay = delay
+    grad.first = 0
+    grad.last = 0
+
+    return grad

+ 53 - 0
LF_scanner/pypulseq/make_trigger.py

@@ -0,0 +1,53 @@
+# inserted for trigger support by mveldmann
+
+from types import SimpleNamespace
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def make_trigger(
+    channel: str, delay: float = 0, duration: float = 0, system: Opts = Opts()
+) -> SimpleNamespace:
+    """
+     Create a trigger halt event for a synchronisation with an external signal from a given channel with an optional
+     given delay prio to the sync and duration after the sync. Possible channel values: 'physio1','physio2'
+     (Siemens specific).
+
+    See also `pypulseq.Sequence.sequence.Sequence.add_block()`.
+
+     Parameters
+     ----------
+     channel : str
+         Must be one of 'physio1' or 'physio2'.
+     delay : float, default=0
+         Delay in seconds
+     duration: float, default=0
+         Duration in seconds.
+     system : Opts, default=Opts()
+         System limits.
+
+     Returns
+     -------
+     trigger : SimpleNamespace
+         Trigger event.
+
+     Raises
+     ------
+     ValueError
+         If invalid `channel` is passed. Must be one of 'physio1' or 'physio2'.
+    """
+
+    if channel not in ["physio1", "physio2"]:
+        raise ValueError(
+            f"Channel {channel} is invalid. Must be one of 'physio1' or 'physio2'."
+        )
+
+    trigger = SimpleNamespace()
+    trigger.type = "trigger"
+    trigger.channel = channel
+    trigger.delay = delay
+    trigger.duration = duration
+    if trigger.duration <= system.grad_raster_time:
+        trigger.duration = system.grad_raster_time
+
+    return trigger

+ 110 - 0
LF_scanner/pypulseq/opts.py

@@ -0,0 +1,110 @@
+from LF_scanner.pypulseq.convert import convert
+
+
+class Opts:
+    """
+    System limits of an MR scanner.
+
+    Attributes
+    ----------
+    adc_dead_time : float, default=0
+        Dead time for ADC readout pulses.
+    gamma : float, default=42.576e6
+        Gyromagnetic ratio. Default gamma is specified for Hydrogen.
+    grad_raster_time : float, default=10e-6
+        Raster time for gradient waveforms.
+    grad_unit : str, default='Hz/m'
+        Unit of maximum gradient amplitude. Must be one of 'Hz/m', 'mT/m' or 'rad/ms/mm'.
+    max_grad : float, default=0
+        Maximum gradient amplitude.
+    max_slew : float, default=0
+        Maximum slew rate.
+    rf_dead_time : float, default=0
+        Dead time for radio-frequency pulses.
+    rf_raster_time : float, default=1e-6
+        Raster time for radio-frequency pulses.
+    rf_ringdown_time : float, default=0
+        Ringdown time for radio-frequency pulses.
+    rise_time : float, default=0
+        Rise time for gradients.
+    slew_unit : str, default='Hz/m/s'
+        Unit of maximum slew rate. Must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s' or 'rad/ms/mm/ms'.
+
+    Raises
+    ------
+    ValueError
+        If invalid `grad_unit` is passed. Must be one of 'Hz/m', 'mT/m' or 'rad/ms/mm'.
+        If invalid `slew_unit` is passed. Must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s' or 'rad/ms/mm/ms'.
+    """
+
+    def __init__(
+        self,
+        adc_dead_time: float = 0,
+        adc_raster_time: float = 100e-9,
+        block_duration_raster: float = 10e-6,
+        gamma: float = 42.576e6,
+        grad_raster_time: float = 10e-6,
+        grad_unit: str = "Hz/m",
+        max_grad: float = 0,
+        max_slew: float = 0,
+        rf_dead_time: float = 0,
+        rf_raster_time: float = 1e-6,
+        rf_ringdown_time: float = 0,
+        rise_time: float = 0,
+        slew_unit: str = "Hz/m/s",
+        B0: float = 1.5,
+    ):
+        valid_grad_units = ["Hz/m", "mT/m", "rad/ms/mm"]
+        valid_slew_units = ["Hz/m/s", "mT/m/ms", "T/m/s", "rad/ms/mm/ms"]
+
+        if grad_unit not in valid_grad_units:
+            raise ValueError(
+                f"Invalid gradient unit. Must be one of 'Hz/m', 'mT/m' or 'rad/ms/mm'. "
+                f"Passed: {grad_unit}"
+            )
+
+        if slew_unit not in valid_slew_units:
+            raise ValueError(
+                f"Invalid slew rate unit. Must be one of 'Hz/m/s', 'mT/m/ms', 'T/m/s' or 'rad/ms/mm/ms'. "
+                f"Passed: {slew_unit}"
+            )
+
+        if max_grad == 0:
+            max_grad = convert(from_value=40, from_unit="mT/m", gamma=gamma)
+        else:
+            max_grad = convert(
+                from_value=max_grad, from_unit=grad_unit, to_unit="Hz/m", gamma=gamma
+            )
+
+        if max_slew == 0:
+            max_slew = convert(from_value=170, from_unit="T/m/s", gamma=gamma)
+        else:
+            max_slew = convert(
+                from_value=max_slew, from_unit=slew_unit, to_unit="Hz/m", gamma=gamma
+            )
+
+        if rise_time != 0:
+            max_slew = max_grad / rise_time
+
+        self.max_grad = max_grad
+        self.max_slew = max_slew
+        self.rise_time = rise_time
+        self.rf_dead_time = rf_dead_time
+        self.rf_ringdown_time = rf_ringdown_time
+        self.adc_dead_time = adc_dead_time
+        self.adc_raster_time = adc_raster_time
+        self.rf_raster_time = rf_raster_time
+        self.grad_raster_time = grad_raster_time
+        self.block_duration_raster = block_duration_raster
+        self.gamma = gamma
+        self.B0 = B0
+
+    def __str__(self) -> str:
+        """
+        Print a string representation of the system limits objects.
+        """
+        variables = vars(self)
+        s = [f"{key}: {value}" for key, value in variables.items()]
+        s = "\n".join(s)
+        s = "System limits:\n" + s
+        return s

+ 41 - 0
LF_scanner/pypulseq/points_to_waveform.py

@@ -0,0 +1,41 @@
+import numpy as np
+
+
+def points_to_waveform(
+    amplitudes: np.ndarray, grad_raster_time: float, times: np.ndarray
+) -> np.ndarray:
+    """
+    1D interpolate amplitude values `amplitudes` at time indices `times` as per the gradient raster time
+    `grad_raster_time` to generate a gradient waveform.
+
+    Parameters
+    ----------
+    amplitudes : numpy.ndarray
+        Amplitude values at time indices `times`.
+    grad_raster_time : float
+        Gradient raster time.
+    times : numpy.ndarray
+        Time indices.
+
+    Returns
+    -------
+    waveform : numpy.ndarray
+        Gradient waveform.
+    """
+
+    amplitudes = np.asarray(amplitudes)
+    times = np.asarray(times)
+
+    if amplitudes.size == 0:
+        return np.array([0])
+
+    grd = (
+        np.arange(
+            start=np.round(np.min(times) / grad_raster_time),
+            stop=np.round(np.max(times) / grad_raster_time),
+        )
+        * grad_raster_time
+    )
+    waveform = np.interp(x=grd + grad_raster_time / 2, xp=times, fp=amplitudes)
+
+    return waveform

+ 20 - 0
LF_scanner/pypulseq/recon_examples/2dFFT.py

@@ -0,0 +1,20 @@
+import numpy as np
+from matplotlib import pyplot as plt
+
+from dat2py import dat2py_main
+
+path = r"C:\Users\sravan953\Downloads\FINAL_meas_MID00169_FID00800_pulseq_3D_mprage.dat"
+kspace, img = dat2py_main.main(dat_file_path=path)
+img = np.abs(np.sqrt(np.sum(np.square(img), -1)))
+
+plt.imshow(img)
+plt.show()
+
+
+def main():
+    path = r"C:\Users\sravan953\Desktop\20210424_7datas\gre_meas_MID00176_FID00172_pulseq.dat"
+    # kspace, img = dat2py_main.main(dat_file_path=path)
+
+
+if __name__ == '__main__':
+    main()

+ 0 - 0
LF_scanner/pypulseq/recon_examples/__init__.py


+ 123 - 0
LF_scanner/pypulseq/rotate.py

@@ -0,0 +1,123 @@
+from types import SimpleNamespace
+from typing import List
+
+import numpy as np
+
+from LF_scanner.pypulseq.add_gradients import add_gradients
+from LF_scanner.pypulseq.scale_grad import scale_grad
+
+
+def __get_grad_abs_mag(grad: SimpleNamespace) -> np.ndarray:
+    if grad.type == "trap":
+        return np.abs(grad.amplitude)
+    return np.max(np.abs(grad.waveform))
+
+
+def rotate(
+    *args: SimpleNamespace,
+    angle: float,
+    axis: str,
+) -> List[SimpleNamespace]:
+    """
+    Rotates the corresponding gradient(s) about the given axis by the specified amount. Gradients parallel to the
+    rotation axis and non-gradient(s) are not affected. Possible rotation axes are 'x', 'y' or 'z'.
+
+    See also `pypulseq.Sequence.sequence.add_block()`.
+
+    Parameters
+    ----------
+    axis : str
+        Axis about which the gradient(s) will be rotated.
+    angle : float
+        Angle by which the gradient(s) will be rotated.
+    args : SimpleNamespace
+        Gradient(s).
+
+    Returns
+    -------
+    rotated_grads : [SimpleNamespace]
+        Rotated gradient(s).
+    """
+    axes = ["x", "y", "z"]
+
+    # Cycle through the objects and rotate gradients non-parallel to the given rotation axis. Rotated gradients
+    # assigned to the same axis are then added together.
+
+    # First create indexes of the objects to be bypassed or rotated
+    i_rotate1 = []
+    i_rotate2 = []
+    i_bypass = []
+
+    axes.remove(axis)
+    axes_to_rotate = axes
+    if len(axes_to_rotate) != 2:
+        raise ValueError("Incorrect axes specification.")
+
+    for i in range(len(args)):
+        event = args[i]
+
+        if (event.type != "grad" and event.type != "trap") or event.channel == axis:
+            i_bypass.append(i)
+        else:
+            if event.channel == axes_to_rotate[0]:
+                i_rotate1.append(i)
+            else:
+                if event.channel == axes_to_rotate[1]:
+                    i_rotate2.append(i)
+                else:
+                    i_bypass.append(i)  # Should never happen
+
+    # Now every gradient to be rotated generates two new gradients: one on the original axis and one on the other from
+    # the axes_to_rotate list
+    rotated1 = []
+    rotated2 = []
+    max_mag = 0  # Measure of relevant amplitude
+    for i in range(len(i_rotate1)):
+        g = args[i_rotate1[i]]
+        max_mag = np.max((max_mag, __get_grad_abs_mag(g)))
+        rotated1.append(scale_grad(grad=g, scale=np.cos(angle)))
+        g = scale_grad(grad=g, scale=np.sin(angle))
+        g.channel = axes_to_rotate[1]
+        rotated2.append(g)
+
+    for i in range(len(i_rotate2)):
+        g = args[i_rotate2[i]]
+        max_mag = np.max((max_mag, __get_grad_abs_mag(g)))
+        rotated2.append(scale_grad(grad=g, scale=np.cos(angle)))
+        g = scale_grad(grad=g, scale=-np.sin(angle))
+        g.channel = axes_to_rotate[1]
+        rotated1.append(g)
+
+    # Eliminate zero-amplitude gradients
+    threshold = 1e-6 * max_mag
+    for i in range(len(rotated1) - 1, -1, -1):
+        if __get_grad_abs_mag(rotated1[i]) < threshold:
+            rotated1.pop(i)
+    for i in range(len(rotated2) - 1, -1, -1):
+        if __get_grad_abs_mag(rotated2[i]) < threshold:
+            rotated2.pop(i)
+
+    # Add gradients on the corresponding axis together
+    g = []
+    if len(rotated1) > 1:
+        g.append(add_gradients(grads=rotated1))
+    else:
+        if len(rotated1) != 0:
+            g.append(rotated1[0])
+
+    if len(rotated2) > 1:
+        g.append(add_gradients(grads=rotated2))
+    else:
+        if len(rotated2) != 0:
+            g.append(rotated2[0])
+
+    # Eliminate zero amplitude gradients
+    for i in range(len(g) - 1, -1, -1):
+        if __get_grad_abs_mag(g[i]) < threshold:
+            g.pop(i)
+
+    # Export
+    bypass = np.take(args, i_bypass)
+    rotated_grads = [*bypass, *g]
+
+    return rotated_grads

+ 35 - 0
LF_scanner/pypulseq/scale_grad.py

@@ -0,0 +1,35 @@
+from copy import copy
+from types import SimpleNamespace
+
+
+def scale_grad(grad: SimpleNamespace, scale: float) -> SimpleNamespace:
+    """
+    Scales the gradient with the scalar.
+
+    Parameters
+    ----------
+    grad : SimpleNamespace
+        Gradient event to be scaled.
+    scale : float
+        Scaling factor.
+
+    Returns
+    -------
+    grad : SimpleNamespace
+        Scaled gradient.
+    """
+    # copy() to emulate pass-by-value; otherwise passed grad event is modified
+    scaled_grad = copy(grad)
+    if scaled_grad.type == "trap":
+        scaled_grad.amplitude = scaled_grad.amplitude * scale
+        scaled_grad.area = scaled_grad.area * scale
+        scaled_grad.flat_area = scaled_grad.flat_area * scale
+    else:
+        scaled_grad.waveform = scaled_grad.waveform * scale
+        scaled_grad.first = scaled_grad.first * scale
+        scaled_grad.last = scaled_grad.last * scale
+
+    if hasattr(scaled_grad, "id"):
+        delattr(scaled_grad, "id")
+
+    return scaled_grad

+ 0 - 0
LF_scanner/pypulseq/seq_examples/__init__.py


+ 0 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/__init__.py


+ 339 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_FS_TSE_T1_T2_PD.py

@@ -0,0 +1,339 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_gauss_pulse import make_gauss_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+def FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.45  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(FS_sat_frequency), freq_offset=FS_sat_frequency)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append((ETL - 1) * n_ex - i * n_ex)
+    # print(k_space_list_with_zero)
+    central_num = np.int32(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning, FS: bool):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1': #TODO: create general path
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'T2':
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'PD':
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'][0], #TODO: change format from list to float in GUI
+        rf_dead_time=params['rf_dead_time'][0], #TODO: change format from list to float in GUI
+        adc_dead_time=params['adc_dead_time'][0], #TODO: change format from list to float in GUI
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    flip_fs = round(110 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+    rf90.delay = params['dG']
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+    rf180.delay = params['dG']
+
+    if FS: #TODO add to GUI choise of including or not Fat Sat block
+        g_rf_area = gz_ex.area * 10
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: to create additiolal functions on different k space order filling
+    k_space_order_filing
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+    if FS:
+        block_duration += calc_duration(gx_fs)
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+                    if FS:
+                        seq.add_block(gx_fs, gy_fs, rf_fs)
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        if FS:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE')
+
+            else:
+                print('Please choose image weightning')
+        else:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\FS_t1_TSE\\FS_t1_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\FS_t2_TSE\\FS_t2_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\FS_pd_TSE\\FS_pd_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE')
+            else:
+                print('Please choose image weightning')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T1', FS=True)

+ 287 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_HASTE_T2.py

@@ -0,0 +1,287 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number, part_fourier_factor):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    last_line_num = partial_Fourier(part_fourier_factor, k_space_list_with_zero, n_ex*ETL)
+    k_space_list_with_zero = k_space_list_with_zero[:last_line_num]
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+def partial_Fourier(part_fourier_factor, k_space_order_filing, Np):
+    num_k_lines = Np * part_fourier_factor
+    np.int32(num_k_lines)
+    if (np.int32(num_k_lines) % 2) == 0:
+        num_k_lines = np.int32(num_k_lines)
+    else:
+        num_k_lines = np.int32(num_k_lines) + 1
+
+    return num_k_lines
+
+
+def main(plot: bool, write_seq: bool):
+
+    # Reading json file according to the weightning of the image
+    with open('C:\MRI_seq_files_mess\HASTE_T2.json', 'rb') as f:
+        params = j.load(f)
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    params['part_fourier_factor'] = 1 # TODO add to GUI
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']), params['part_fourier_factor'])  # TODO: to create additiolal functions on different k space order filling
+    k_space_save = {'k_space_order': k_space_order_filing}
+
+    output_filename = "k_space_order_filing"  # save for reconstruction outside the jemris
+    # output_filename = "TSE_T1" + datetime.now().strftime("%Y%m%d_%H%M%S")
+    with open(output_filename + ".json", 'w') as outfile:
+        j.dump(k_space_save, outfile)
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+    params['delay_TD'] = 0.200 # delay    # TODO add to GUI
+                                          # TODO
+    TD_delay = make_delay(params['delay_TD'])
+
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(TD_delay)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_HASTE\\t2_HASTE_matrix16x16.seq')  # Save to disk
+        seq2xml(seq, seq_name='t2_HASTE_matrix16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_HASTE')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 396 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_IR_TSE_T1_T2.py

@@ -0,0 +1,396 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_gauss_pulse import make_gauss_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+def FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.45  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(FS_sat_frequency), freq_offset=FS_sat_frequency)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+def inversion_block(params, scanner_parameters):
+    #function creates inversion recovery block with delay
+    params['IR_time'] = 0.140  # STIR # TODO add to GUI
+    #params['IR_time'] = 2.250  # FLAIR # TODO add to GUI
+    flip_ir = round(180 * pi / 180)
+    rf_ir, gz_ir, _ = make_sinc_pulse(flip_angle=flip_ir, system=scanner_parameters, duration=params['t_ref'],
+                                      slice_thickness=params['sl_thkn'], apodization=0.3,
+                                      time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                      return_gz=True)
+    delay_IR = np.ceil(params['IR_time'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_ir, gz_ir, delay_IR
+
+def SPAIR_block(params, scanner_parameters, g_rf_area):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.45  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    params['IR_time'] = 0.140  # SPAIR # TODO add to GUI
+
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_SPAIR = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(FS_sat_frequency), freq_offset=FS_sat_frequency)
+    gx_SPAIR = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_SPAIR = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    delay_IR = np.ceil(params['IR_time'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_fs, gx_fs, gy_fs
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append((ETL - 1) * n_ex - i * n_ex)
+    # print(k_space_list_with_zero)
+    central_num = np.int32(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning, FS: bool, IR: bool):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1': #TODO: create general path
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\IR_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'T2':
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\IR_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'PD':
+        if FS:
+            with open('C:\MRI_seq_files_mess\FS_TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'][0], #TODO: change format from list to float in GUI
+        rf_dead_time=params['rf_dead_time'][0], #TODO: change format from list to float in GUI
+        adc_dead_time=params['adc_dead_time'][0], #TODO: change format from list to float in GUI
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    flip_fs = round(110 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    if FS: #TODO add to GUI choise of including or not Fat Sat block
+        g_rf_area = gz_ex.area * 10
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs)
+
+    if IR: #TODO add to GUI choise of including or not Inversion block
+        rf_ir, gz_ir, delay_IR = inversion_block(params, scanner_parameters)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: to create additiolal functions on different k space order filling
+    k_space_order_filing
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+    if FS:
+        block_duration += calc_duration(gx_fs)
+    if IR:
+        block_duration += max(calc_duration(rf_ir), calc_duration(gz_ir))
+        block_duration += calc_duration(delay_IR)
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+                    if FS:
+                        seq.add_block(gx_fs, gy_fs, rf_fs)
+                    if IR:
+                        seq.add_block(gz_ir, rf_ir)
+                        seq.add_block(delay_IR)
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        if FS:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE\\FS_t1_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE\\FS_t2_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE\\FS_pd_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE')
+
+            else:
+                print('Please choose image weightning')
+        elif IR:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE\\IR_t1_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE\\IR_t2_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE')
+            else:
+                print('Please choose image weightning')
+        else:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrx16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrix16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrix16x16.seq')  # Save to disk
+                seq2xml(seq, seq_name='pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE')
+            else:
+                print('Please choose image weightning')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T2', FS=False, IR=False)

+ 213 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_SE.py

@@ -0,0 +1,213 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#----------------------------------------------
+from math import pi
+
+import numpy as np
+import math
+import json as j
+
+from MRI_seq.pypulseq.Sequence.sequence import Sequence
+from MRI_seq.pypulseq.calc_rf_center import calc_rf_center
+from MRI_seq.pypulseq.calc_duration import calc_duration
+from MRI_seq.pypulseq.make_adc import make_adc
+from MRI_seq.pypulseq.make_delay import make_delay
+from MRI_seq.pypulseq.make_sinc_pulse import make_sinc_pulse
+from MRI_seq.pypulseq.make_trapezoid import make_trapezoid
+from MRI_seq.pypulseq.opts import Opts
+
+from MRI_seq.py2jemris.seq2xml import seq2xml
+from utilities import phase_grad_utils as pgu
+
+def main(plot: bool, write_seq: bool):
+
+    # Read parameters
+    with open('C:\MRI_seq_files_mess\\TSE\\SE_T1.json', 'rb') as f:
+        params = j.load(f)
+
+    tau = params['TE'] / 2
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # Set the hardware limits and initialize sequence object
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    # RF objects
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                    slice_thickness=params['sl_thkn'], apodization=0.3,
+                                    time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                      slice_thickness=params['sl_thkn'], apodization=0.3,
+                                      time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                      return_gz=True)
+
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz90.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz180.amplitude
+
+    # Slice selection gradients
+    # gz_reph rephase gradient after gz90
+    t_gz_reph = np.ceil(params['t_ex'] / 2 / params['grad_raster_time']) * params['grad_raster_time']
+    gz_reph = make_trapezoid(channel='z', system=scanner_parameters, flat_area=-gz90.area / 2,
+                             flat_time=t_gz_reph, rise_time=params['dG'])
+    t_gz_spoil = np.ceil(params['t_ref'] / 2 / params['grad_raster_time']) * params['grad_raster_time']
+
+    # gz_spoil spoil gradients around
+    # gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area*3,
+    #                         duration=t_gz_spoil*2.5)
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'])
+    gz_sps = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4)
+
+    # READOUT gradients & events
+
+    # k-space readout
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+
+    # generate gx readout gradient
+    t_gx = np.ceil(readout_time / params['grad_raster_time']) * params['grad_raster_time']
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx)
+
+    # generate gx_pre readout prephase gradient
+    # t_gx_pre = np.ceil(readout_time / 2 / params['grad_raster_time']) * params['grad_raster_time']
+    gx_pre = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area * 1.5, rise_time=params['dG'])
+
+    # generate gx spoile gradient
+    # t_gx_spoil = np.ceil(readout_time / 2 / params['grad_raster_time']) * params['grad_raster_time']
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, rise_time=params['dG'])
+
+    # generate ADC block
+    gx.rise_time = np.ceil(gx.rise_time / params['grad_raster_time']) * params['grad_raster_time']
+    gx.flat_time = np.ceil(gx.flat_time / params['grad_raster_time']) * params['grad_raster_time']
+
+    adc = make_adc(num_samples=params['Nf'], duration=gx.flat_time, delay=gx.rise_time, system=scanner_parameters)
+
+    # PREPHASE AND REPHASE
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))
+
+    t_gy_pre = np.ceil(params['t_ex'] / 2 / params['grad_raster_time']) * params['grad_raster_time']
+    k_phase
+    gy_pre = make_trapezoid(channel='y', system=scanner_parameters, area=k_steps_PE[-1],
+                            duration=t_gy_pre)
+
+    # DELAYS
+    delay1 = tau - calc_duration(gz90) / 2 - max(calc_duration(gx_pre), calc_duration(gy_pre), calc_duration(gz_reph))
+    delay1 -= calc_duration(gz_spoil)
+    delay1 -= calc_duration(gz180) / 2
+    delay1 = np.ceil(delay1 / params['grad_raster_time']) * params['grad_raster_time']
+    delay1 = make_delay(delay1)
+
+    delay2 = tau - calc_duration(gz180) / 2 - calc_duration(gz_spoil)
+    delay2 -= calc_duration(gx_spoil)
+    delay2 -= calc_duration(gx) / 2
+    delay2 = np.ceil(delay2 / params['grad_raster_time']) * params['grad_raster_time']
+    delay2 = make_delay(delay2)
+
+    delay_TR = params['TR'] - calc_duration(gz90) / 2 - calc_duration(gx) / 2 - params['TE']
+    delay_TR -= max(calc_duration(gy_pre), calc_duration(gz_sps))
+    delay_TR -= calc_duration(gx_spoil)
+    delay_TR = np.ceil(delay_TR / params['grad_raster_time']) * params['grad_raster_time']
+    delay_TR = make_delay(delay_TR)
+
+    print(f'delay_1: {delay1}')
+    print(f'delay_2: {delay1}')
+    print(f'delay_TR: {delay_TR}')
+
+    # CONSTRUCT CONCATINATIONS timings
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = params['TR'] - delay_TR.delay  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [max(params['grad_raster_time'], params['rf_raster_time']) * np.floor(
+        x / max(params['grad_raster_time'], params['rf_raster_time'])) for x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # CONSTRUCT SEQUENCE
+
+    # working version without concatinations
+
+    for k in range(np.int32(params['Average'])):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_step in range(k_steps_PE.size):
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+
+                    seq.add_block(rf90, gz90)
+                    t_gy_pre = np.ceil(params['t_ex'] / 2 / params['grad_raster_time']) * params['grad_raster_time']
+                    gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                            area=k_steps_PE[phase_step], duration=t_gy_pre)
+                    seq.add_block(gx_pre, gy_pre, gz_reph)
+                    seq.add_block(delay1)
+                    seq.add_block(gz_spoil)
+                    seq.add_block(rf180, gz180)
+                    seq.add_block(gz_spoil)
+                    seq.add_block(delay2)
+                    seq.add_block(gx_spoil)
+                    seq.add_block(gx, adc)
+                    seq.add_block(gx_spoil)
+                    gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                            area=-k_steps_PE[phase_step], duration=t_gy_pre)
+                    seq.add_block(gy_pre, gz_sps)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    if plot:
+        seq.plot()
+
+    if write_seq:
+        seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_se\\t1_SE_matrx32x32.seq')
+        seq2xml(seq, seq_name='t1_SE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_se')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 452 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_SPAIR_TSE_T2.py

@@ -0,0 +1,452 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_gauss_pulse import make_gauss_pulse
+from pypulseq.make_adiabatic_pulse import make_adiabatic_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+def FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    #params['FS_sat_ppm'] = -3.30  # TODO add to GUI
+    #params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(params['BW_sat']), freq_offset=FS_sat_frequency)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*g_rf_area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*g_rf_area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+def inversion_block(params, scanner_parameters):
+    #function creates inversion recovery block with delay
+    #params['IR_time'] = 0.140  # STIR # TODO add to GUI
+    #params['IR_time'] = 2.250  # FLAIR # TODO add to GUI
+    flip_ir = round(180 * pi / 180)
+    rf_ir, gz_ir, _ = make_sinc_pulse(flip_angle=flip_ir, system=scanner_parameters, duration=params['t_ref'],
+                                      slice_thickness=params['sl_thkn'], apodization=0.3,
+                                      time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                      return_gz=True)
+    delay_IR = np.ceil(params['TI'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_ir, gz_ir, delay_IR
+
+def SPAIR_block(params, scanner_parameters, g_rf_area):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.30  # TODO add to GUI
+    params['FS_pulse_duration'] = 0.01  # TODO add to GUI
+    #params['IR_time'] = 0.140  # SPAIR # TODO add to GUI
+    params['BW_sat'] = -176.26464
+
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+    flip_SPAIR = round(180 * pi / 180)
+
+    rf_SPAIR = make_gauss_pulse(flip_angle=flip_SPAIR, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(params['BW_sat']), freq_offset=FS_sat_frequency)
+    gx_SPAIR = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_SPAIR = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    delay_IR = np.ceil(params['TI'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number, order):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    if order == 'non_linear':
+        a = k_space_list_with_zero[:((shift-index_central_line)*2+1)]
+        b = k_space_list_with_zero[((shift-index_central_line)*2+1):]
+        for i in range(1, int(len(b)/2)+1):
+            a.append(b[i-1])
+            a.append(b[-i])
+        a.append(b[i])
+        k_space_list_with_zero = a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning, FS: bool, IR: bool, SPAIR):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1': #TODO: create general path
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\TSE\IR_TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_T1.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'T2':
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        elif IR:
+            with open('C:\MRI_seq_files_mess\TSE\IR_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        elif SPAIR:
+            with open('C:\MRI_seq_files_mess\TSE\SPAIR_TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_T2.json', 'rb') as f:
+                params = j.load(f)
+
+    elif weightning == 'PD':
+        if FS:
+            with open('C:\MRI_seq_files_mess\TSE\FS_TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+        else:
+            with open('C:\MRI_seq_files_mess\TSE\TSE_PD.json', 'rb') as f:
+                params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    flip_fs = round(110 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    if FS: #TODO add to GUI choise of including or not Fat Sat block
+        g_rf_area = gz_ex.area * 10
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs)
+
+    if IR: #TODO add to GUI choise of including or not Inversion block
+        rf_ir, gz_ir, delay_IR = inversion_block(params, scanner_parameters)
+
+    if SPAIR: #TODO add to GUI choise of including or not Inversion block
+        g_rf_area = gz_ex.area * 10
+        rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR = SPAIR_block(params, scanner_parameters, g_rf_area)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+    gz_cr2 = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 12, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']), 'non_linear')  # TODO: to create additiolal functions on different k space order filling
+    k_space_save = {'k_space_order': k_space_order_filing}
+
+    output_filename = "k_space_order_filing"  #save for reconstruction outside the jemris
+    # output_filename = "TSE_T1" + datetime.now().strftime("%Y%m%d_%H%M%S")
+    with open(output_filename + ".json", 'w') as outfile:
+        j.dump(k_space_save, outfile)
+
+
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+    block_duration += calc_duration(gz_cr2)
+
+    if FS:
+        block_duration += calc_duration(gx_fs)
+    if IR:
+        block_duration += max(calc_duration(rf_ir), calc_duration(gz_ir))
+        block_duration += calc_duration(delay_IR)
+    if SPAIR:
+        block_duration += calc_duration(gx_SPAIR)
+        block_duration += calc_duration(delay_IR)
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    if max_slices_per_TR == 0:
+        max_slices_per_TR = 1
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+                    crasher_flag = True
+                    if FS:
+                        seq.add_block(gz_cr2)
+                        seq.add_block(gx_fs, gy_fs, rf_fs)
+                        crasher_flag = False
+
+                    if IR:
+                        seq.add_block(gz_cr2)
+                        seq.add_block(gz_ir, rf_ir)
+                        seq.add_block(delay_IR)
+                        crasher_flag = False
+
+                    if SPAIR:
+                        seq.add_block(gz_cr2)
+                        seq.add_block(gx_SPAIR, gy_SPAIR, rf_SPAIR)
+                        seq.add_block(delay_IR)
+                        crasher_flag = False
+
+                    if crasher_flag:
+                        seq.add_block(gz_cr2)
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        if FS:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE\\FS_t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_FS_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE\\FS_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_FS_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE\\FS_pd_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='FS_pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_FS_TSE')
+
+            else:
+                print('Please choose image weightning')
+        elif IR:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE\\IR_t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_IR_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE\\IR_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='IR_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_IR_TSE')
+            else:
+                print('Please choose image weightning')
+        elif SPAIR:
+            if weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_SPAIR_TSE\\SPAIR_t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='SPAIR_t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_SPAIR_TSE')
+
+        else:
+            if weightning == 'T1':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+            elif weightning == 'T2':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+            elif weightning == 'PD':
+                seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrix32x32.seq')  # Save to disk
+                seq2xml(seq, seq_name='pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE')
+            else:
+                print('Please choose image weightning')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T2', FS=False, IR=False, SPAIR=True)

+ 289 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_TSE_T1_T2_PD.py

@@ -0,0 +1,289 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append((ETL - 1) * n_ex - i * n_ex)
+    # print(k_space_list_with_zero)
+    central_num = np.int32(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+def partial_Fourier(a):
+    return a
+
+
+def main(plot: bool, write_seq: bool, weightning):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1': #TODO: create general path
+        with open('C:\MRI_seq_files_mess\TSE_T1.json', 'rb') as f:
+            params = j.load(f)
+
+    elif weightning == 'T2':
+        with open('C:\MRI_seq_files_mess\TSE_T2.json', 'rb') as f:
+            params = j.load(f)
+
+    elif weightning == 'PD':
+        with open('C:\MRI_seq_files_mess\TSE_PD.json', 'rb') as f:
+            params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'][0],
+        rf_dead_time=params['rf_dead_time'][0],
+        adc_dead_time=params['adc_dead_time'][0],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: to create additiolal functions on different k space order filling
+    k_space_order_filing
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq: #TODO: create general path
+        if weightning == 'T1':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+        elif weightning == 'T2':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+        elif weightning == 'PD':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='pd_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE')
+
+        else:
+            print('Please choose image weightning')
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T1')

+ 289 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_TSE_T2_RESTORE.py

@@ -0,0 +1,289 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T2':
+        with open('C:\MRI_seq_files_mess\TSE\RESTORE_T2.json', 'rb') as f:
+            params = j.load(f)
+    else:
+        print('exists only for T2')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Restore RF pulse -90
+    rf_restore, gz_resto, _ = make_sinc_pulse(flip_angle=-flip90, system=scanner_parameters, duration=params['t_ex'],
+                                              slice_thickness=params['sl_thkn'], apodization=0.3,
+                                              time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+    pulse_offsets_restore = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                        params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_resto.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_restore = t_exwd
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+    gz_restore = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_resto.amplitude,
+                                flat_time=t_restore, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: to create additiolal functions on different k space order filling
+    k_space_save = {'k_space_order': k_space_order_filing}
+    output_filename = "k_space_order_filing"  # save for reconstruction outside the jemris
+    # output_filename = "TSE_T1" + datetime.now().strftime("%Y%m%d_%H%M%S")
+    with open(output_filename + ".json", 'w') as outfile:
+        j.dump(k_space_save, outfile)
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL'])):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz180))
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(rf90), calc_duration(gz90))
+    block_duration += calc_duration(gz_cr)
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    rf_restore.freq_offset = pulse_offsets_restore[curr_slice]
+                    rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                            seq.add_block(gz180, rf180)
+                            seq.add_block(gz_reph, gx_pre)
+                            seq.add_block(gz_restore, rf_restore)
+                            seq.add_block(gz_cr)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if weightning == 'T2':
+        seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE_RESTORE\\t2_TSE_RESTORE_matrix32x32.seq')  # Save to disk
+        seq2xml(seq, seq_name='t2_TSE_RESTORE_matrx32x32',
+                out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE_RESTORE')
+
+    else:
+        print('works only with T2')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T2')

+ 264 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_epi_SE_T2.py

@@ -0,0 +1,264 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+import matplotlib as plt
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_gauss_pulse import make_gauss_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+def FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    #params['FS_sat_ppm'] = -3.30  # TODO add to GUI
+    #params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(params['BW_sat']), freq_offset=FS_sat_frequency)
+    #TODO
+    #rf_fs.phaseOffset=-2*pi*rf_fs.freqOffset*mr.calcRfCenter(rf_fs)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*g_rf_area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*g_rf_area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+
+def main(plot: bool, write_seq: bool, FS: bool, kplot: bool, seq_filename: str = "epi_se_pypulseq.seq"):
+
+    # Reading json file according to the weightning of the image
+    with open('C:\MRI_seq_files_mess\TSE\EPI_T2.json', 'rb') as f:
+        params = j.load(f)
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    flip_fs = round(110 * pi / 180)
+
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    if FS: #TODO add to GUI choise of including or not Fat Sat block
+        g_rf_area = gz_ex.area * 10
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, g_rf_area, flip_fs)
+
+        # generate basic blip gradient - G_blip
+    k_blip = 1 / np.double(params['FoV_ph'])
+    ty_blip = math.ceil(2 * math.sqrt(
+        k_blip / params['G_slew_max']) / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gy_blip = make_trapezoid(channel='y', system=scanner_parameters, area=k_blip)
+
+    # generate basic gx readout gradient - G_read
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+
+
+    # read prephase gradient - G_pre_r
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=-gx.area * 0.50,
+                            rise_time=params['dG'])
+
+    #gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 0.50 - 1 / np.double(params['FoV_f'] /2),
+    #                        rise_time=params['dG'])
+
+    # phase prephase gradient - G_pre_ph
+    gy_pre = make_trapezoid(channel="y", system=scanner_parameters, area=-(params['Np']/2-1)/params['FoV_ph'])
+
+    # pre
+    gz_reph = make_trapezoid(channel='z', system=scanner_parameters, area=-gz90.area/2)
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil1 = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area, rise_time=params['dG'],
+                              flat_time=params['dG'])
+    gz_spoil2 = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 1.5, rise_time=params['dG'])
+
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    a = np.ceil(readout_time/ 4 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time + a, system=scanner_parameters)
+
+    #---------------------------
+    # Diffusion Gradient
+    #---------------------------
+    big_delta = params['TE']/2 - (max(calc_duration(rf90), calc_duration(gz90)))/2 + max(calc_duration(rf180), calc_duration(gz180))/2 + max(calc_duration(gy_pre), calc_duration(gz_spoil2))
+    c = calc_duration(gx)
+    t_to_center = (params['Nf']/2+0.5) * calc_duration(gx) + params['Np'] / 2 * calc_duration(gy_blip)
+    t_to_center = np.ceil(t_to_center / params['grad_raster_time']) * params['grad_raster_time']
+
+    t_diff = params['TE']/2 - max(calc_duration(rf180), calc_duration(gz180))/2 - params['dG'] - max(calc_duration(gy_pre), calc_duration(gz_spoil2)) - calc_duration(gx_pre) - t_to_center
+    small_delta = t_diff - params['dG']
+    params['G_amp_max']
+    G_amp_max = params['G_amp_max'] * 1e-3 * params['gamma']
+    small_delta_min = math.sqrt(params['b'] / (big_delta - small_delta / 3)) * 1 / (2 * pi * G_amp_max) * 1e3
+    G_diff = math.sqrt(params['b'] / (big_delta - small_delta / 3)) * 1 / (2 * pi * small_delta) * 1e3
+    gz_diffusion = make_trapezoid(channel='z', system=scanner_parameters, flat_time =small_delta, amplitude=G_diff)
+    a = calc_duration(gz_diffusion)
+
+    #----------------------------
+    #calculate delays
+    #----------------------------
+    delay_TE1 = params['TE']/2 - (max(calc_duration(rf90), calc_duration(gz90)))/2 - max(calc_duration(rf180), calc_duration(gz180))/2 - calc_duration(gz_spoil1) - calc_duration(gz_diffusion)
+    delay_TE2 = params['TE']/2 - (max(calc_duration(rf180), calc_duration(gz180)))/2 - calc_duration(gz_spoil2) - calc_duration(gz_diffusion) - t_to_center - calc_duration(gy_pre)
+    delay_TE1 = np.ceil(delay_TE1 / params['grad_raster_time']) * params['grad_raster_time']
+    delay_TE2 = np.ceil(delay_TE2 / params['grad_raster_time']) * params['grad_raster_time']
+
+    delay_TE1 = make_delay(delay_TE1)
+    delay_TE2 = make_delay(delay_TE2)
+
+    block_duration = params['TE'] + t_to_center + calc_duration(gz_cr)
+    if FS:
+        block_duration += calc_duration(gx_fs)
+
+    # --------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    # --------------------------
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                rf90.freq_offset = pulse_offsets90[curr_slice]
+                rf180.freq_offset = pulse_offsets180[curr_slice]
+                rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                print('curr_concat_' + str(curr_concat))
+                print('curr_slice_' + str(curr_slice))
+
+                if FS:
+                    seq.add_block(gx_fs, gy_fs, rf_fs)
+                seq.add_block(gz90, rf90)
+                seq.add_block(gz_diffusion)
+                seq.add_block(delay_TE1)
+                seq.add_block(gz_spoil1)
+                seq.add_block(gz180, rf180)
+                seq.add_block(gz_spoil2)
+                seq.add_block(gz_diffusion)
+                seq.add_block(delay_TE2)
+                seq.add_block(gx_pre, gy_pre)
+                for i in range(params['Np']):
+                    seq.add_block(gx, adc)  # Read one line of k-space
+                    seq.add_block(gy_blip)  # Phase blip
+                    gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+                seq.add_block(gz_cr)
+                seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed! Error listing follows:")
+        print(error_report)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    if kplot:
+        ktraj_adc, ktraj, t_excitation, t_refocusing, t_adc = seq.calculate_kspace()
+
+        time_axis = np.arange(1, ktraj.shape[1] + 1) * scanner_parameters.grad_raster_time
+        plt.plot(time_axis, ktraj.T)
+        plt.plot(t_adc, ktraj_adc[0, :], '.')
+        plt.figure()
+        plt.plot(ktraj[0, :], ktraj[1, :], 'b')
+        plt.axis('equal')
+        plt.plot(ktraj_adc[0, :], ktraj_adc[1, :], 'r.')
+        plt.show()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, kplot = False, FS=True)

+ 286 - 0
LF_scanner/pypulseq/seq_examples/new_scripts/write_tse.py

@@ -0,0 +1,286 @@
+#---------------------------------------------------------------------
+# imports of the libraries
+#---------------------------------------------------------------------
+from math import pi
+import numpy as np
+import math
+import json as j
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.calc_duration import calc_duration
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.opts import Opts
+from pypulseq.align import align
+from pypulseq.traj_to_grad import traj_to_grad
+
+from pypulseq.utilities import phase_grad_utils as pgu
+
+from py2jemris.seq2xml import seq2xml
+
+
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append((ETL - 1) * n_ex - i * n_ex)
+    # print(k_space_list_with_zero)
+    central_num = np.int32(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+    return k_space_order_filing
+
+
+def main(plot: bool, write_seq: bool, weightning):
+
+    # Reading json file according to the weightning of the image
+    if weightning == 'T1':
+        with open('C:\MRI_seq_files_mess\TSE_T1.json', 'rb') as f:
+            params = j.load(f)
+
+    elif weightning == 'T2':
+        with open('C:\MRI_seq_files_mess\TSE_T2.json', 'rb') as f:
+            params = j.load(f)
+
+    elif weightning == 'T1':
+        with open('C:\MRI_seq_files_mess\TSE_T1.json', 'rb') as f:
+            params = j.load(f)
+    else:
+        print('Please choose image weightning')
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+
+    # --------------------------
+    # Set system limits
+    # --------------------------
+
+    scanner_parameters = Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+    seq = Sequence(scanner_parameters)
+
+    #--------------------------
+    # RF & Gradients
+    #--------------------------
+
+    rf90_phase = np.pi / 2
+    rf180_phase = 0
+
+    flip90 = round(params['FA'] * pi / 180, 3)
+    flip180 = round(180 * pi / 180)
+    rf90, gz_ex, _ = make_sinc_pulse(flip_angle=flip90, system=scanner_parameters, duration=params['t_ex'],
+                                     slice_thickness=params['sl_thkn'], apodization=0.3,
+                                     time_bw_product=round(params['t_BW_product_ex'], 8), return_gz=True)
+
+    rf180, gz_ref, _ = make_sinc_pulse(flip_angle=flip180, system=scanner_parameters, duration=params['t_ref'],
+                                       slice_thickness=params['sl_thkn'], apodization=0.3,
+                                       time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                       return_gz=True)
+
+    # Prepare RF offsets. This is required for multi-slice acquisition
+    pulse_offsets90 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                  params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ex.amplitude
+    pulse_offsets180 = (np.linspace(0.0, params['sl_nb'] - 1.0, np.int16(params['sl_nb'])) - 0.5 * (
+                np.double(params['sl_nb']) - 1.0)) * (
+                                   params['sl_thkn'] * (100.0 + params['sl_gap']) / 100.0) * gz_ref.amplitude
+
+    # slice selective gradient drafts for complex gradient blocks
+    t_exwd = params['t_ex'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+    t_refwd = params['t_ref'] + scanner_parameters.rf_ringdown_time + scanner_parameters.rf_dead_time
+
+    gz90 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ex.amplitude,
+                          flat_time=t_exwd, rise_time=params['dG'])
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=t_refwd, rise_time=params['dG'])
+
+    # generate basic gx readout gradient - G_read
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx + 2 * scanner_parameters.adc_dead_time)
+
+    # generate gx spoiler gradient - G_crr
+    gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area, flat_time=params['dG'],
+                              rise_time=params['dG'])
+
+    # read prephase gradient - G_pre
+    gx_pre = make_trapezoid(channel="x", system=scanner_parameters, area=gx.area * 1.50,
+                            rise_time=params['dG'])
+
+    # rephase gradient draft after 90 RF pulse  - G_reph
+    gz_reph = make_trapezoid(channel="z", system=scanner_parameters, area=gz_ex.area * 0.25,
+                             flat_time=calc_duration(gx_pre), rise_time=params['dG'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    t_gz_spoil = np.ceil(
+        params['t_ref'] / 2 / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gz_spoil = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 0.75, rise_time=params['dG'],
+                              flat_time=params['dG'])
+
+    # spoil gradient G_sps
+    gz_cr = make_trapezoid(channel='z', system=scanner_parameters, area=gz90.area * 4, rise_time=params['dG'])
+
+    # Creation of ADC
+    adc = make_adc(num_samples=params['Nf'], duration=t_gx, delay=scanner_parameters.adc_dead_time,
+                   system=scanner_parameters)
+
+    #--------------------------
+    # k-space filling quantification
+    #--------------------------
+
+    k_phase = np.double(params['Np']) / np.double(params['FoV_ph'])
+    k_steps_PE = pgu.create_k_steps(k_phase, np.int16(params['Np']))  # list of phase encoding gradients
+
+    n_ex = math.floor(params['Np'] / params['ETL'])  # number of excitations
+    k_space_order_filing = TSE_k_space_fill(n_ex, np.int32(params['ETL']), np.int32(params['Np']), np.int32(
+        params['N_TE']))  # TODO: create function on different k space order filling
+    k_space_order_filing
+
+    #--------------------------
+    # DELAYS
+    #--------------------------
+
+    block_duration = 0
+    block_duration = max(calc_duration(rf90), calc_duration(gz90)) / 2
+    block_duration += max(calc_duration(gx_pre), calc_duration(gz_spoil))
+    for i in range(np.int32(params['ETL']) - 1):
+        block_duration += max(calc_duration(rf180), calc_duration(gz180))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+        block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+        block_duration += calc_duration(gz_spoil)
+    block_duration += max(calc_duration(rf180), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(gz_spoil))
+    block_duration += max(calc_duration(gx_spoil), calc_duration(adc))
+    block_duration += calc_duration(gz_cr)
+
+    #--------------------------
+    # CONSTRUCT CONCATINATIONS timings
+    #--------------------------
+
+    # Quantification of Effective TE loop
+    # eff_time = TE + calc_duration(gx) / 2 + max(calc_duration(gy_pre),calc_duration(gz_spoil)) + calc_duration(gx_spoil) + calc_duration(gz90) / 2
+    eff_time = block_duration  # equal to previous!
+
+    max_slices_per_TR = np.floor(params['TR'] / eff_time)
+    required_concats = np.int32(np.ceil(params['sl_nb'] / max_slices_per_TR))
+    slice_list = list(range(np.int32(params['sl_nb'])))
+    slice_list = [slice_list[x::required_concats] for x in range(required_concats)]
+
+    # Calculate the TR fillers
+    tr_pauses = [(params['TR'] / np.double(len(x))) - eff_time for x in slice_list]
+    tr_pauses = [
+        max(seq.grad_raster_time, seq.rf_raster_time) * np.floor(x / max(seq.grad_raster_time, seq.rf_raster_time)) for
+        x in tr_pauses]
+
+    # Generate the TR fillers
+    tr_fillers = [make_delay(x) for x in tr_pauses]
+
+    # --------------------------
+    # CONSTRUCT SEQUENCE
+    # --------------------------
+
+    for k in range(params['Average']):  # Averages
+        for curr_concat in range(required_concats):
+            for phase_steps in k_space_order_filing:  # in stead of phase steps list of phase steps
+                for curr_slice in range(np.int32(params['sl_nb'])):  # Slices
+                    # Apply RF offsets
+                    n_echo_temp = 0
+                    rf90.freq_offset = pulse_offsets90[curr_slice]
+                    rf180.freq_offset = pulse_offsets180[curr_slice]
+                    # rf90.phase_offset = (rf90_phase - 2 * np.pi * rf90.freq_offset * calc_rf_center(rf90)[0])
+                    # rf180.phase_offset = (rf180_phase - 2 * np.pi * rf180.freq_offset * calc_rf_center(rf180)[0])
+                    print('curr_concat_' + str(curr_concat))
+                    print('curr_slice_' + str(curr_slice))
+
+                    seq.add_block(gz90, rf90)
+                    seq.add_block(gz_reph, gx_pre)
+                    for phase_step in phase_steps:
+                        print('phase step_' + str(phase_step))
+                        seq.add_block(gz180, rf180)
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=-k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        print(k_steps_PE[phase_step])
+
+                        seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                        seq.add_block(gx, adc)
+                        n_echo_temp += 1
+                        gy_pre = make_trapezoid(channel='y', system=scanner_parameters,
+                                                area=k_steps_PE[phase_step], duration=calc_duration(gz_spoil),
+                                                rise_time=params['dG'])
+                        if n_echo_temp == np.int32(params['ETL']):
+                            seq.add_block(gz_cr, gx_spoil, gy_pre)
+                        else:
+                            seq.add_block(gz_spoil, gx_spoil, gy_pre)
+                    seq.add_block(tr_fillers[curr_concat])
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        if weightning == 'T1':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE\\t1_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='t1_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+        elif weightning == 'T2':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE\\t2_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+        elif weightning == 'PD':
+            seq.write('C:\\MRI_seq\\new_MRI_pulse_seq\\pd_TSE\\pd_TSE_matrx16x16.seq')  # Save to disk
+            seq2xml(seq, seq_name='t2_TSE_matrx16x16_myGrad', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t2_TSE')
+
+        else:
+            print('Please choose image weightning')
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True, weightning='T1')

+ 449 - 0
LF_scanner/pypulseq/seq_examples/notebooks/write_t2_se.ipynb

@@ -0,0 +1,449 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "MiKvRj5u076V"
+   },
+   "source": [
+    "## **ABOUT**\n",
+    "This example illustrates the 2D multi-slice, Spin Echo (SE) acquisition using the `pypulseq` library. This sequence is typically used for T<sub>2</sub> weighted imaging. A 2D Fourier transform can be used to reconstruct images from this acquisition. Read more about SE [here](http://mriquestions.com/se-vs-multi-se-vs-fse.html).\n",
+    "\n",
+    "**Contact**: For issues, write to ks3621@columbia.edu\n",
+    "\n",
+    "---"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "Y98YDJr215fa"
+   },
+   "source": [
+    "## **INSTALL** `pypulseq`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "ogKNAZH3TmgA"
+   },
+   "outputs": [],
+   "source": [
+    "!pip install git+https://github.com/imr-framework/pypulseq.git@dev"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "UgqzEwle2xCd"
+   },
+   "source": [
+    "## **IMPORT PACKAGES**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "3X7UsV832B6j"
+   },
+   "outputs": [],
+   "source": [
+    "from math import pi\n",
+    "\n",
+    "import numpy as np\n",
+    "\n",
+    "from pypulseq.Sequence.sequence import Sequence\n",
+    "from pypulseq.calc_duration import calc_duration\n",
+    "from pypulseq.make_adc import make_adc\n",
+    "from pypulseq.make_delay import make_delay\n",
+    "from pypulseq.make_sinc_pulse import make_sinc_pulse\n",
+    "from pypulseq.make_trapezoid import make_trapezoid\n",
+    "from pypulseq.opts import Opts"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "UQ4AWw9l4et_"
+   },
+   "source": [
+    "## **USER INPUTS**\n",
+    "\n",
+    "These parameters are typically on the user interface of the scanner computer console "
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "ssnNwiQH4q_0"
+   },
+   "outputs": [],
+   "source": [
+    "nsa = 1  # Number of averages\n",
+    "n_slices = 3  # Number of slices\n",
+    "Nx = 128\n",
+    "Ny = 128\n",
+    "fov = 220e-3  # mm\n",
+    "slice_thickness = 5e-3  # s\n",
+    "slice_gap = 15e-3  # s\n",
+    "rf_flip = 90  # degrees\n",
+    "rf_offset = 0\n",
+    "print('User inputs setup')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "PeYeI0V45ZfD"
+   },
+   "source": [
+    "## **SYSTEM LIMITS**\n",
+    "Set the hardware limits and initialize sequence object"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "XHs1LT965kqg"
+   },
+   "outputs": [],
+   "source": [
+    "system = Opts(max_grad=32, grad_unit='mT/m', max_slew=130, slew_unit='T/m/s', \n",
+    "              grad_raster_time=10e-6, rf_ringdown_time=10e-6, \n",
+    "              rf_dead_time=100e-6)\n",
+    "seq = Sequence(system)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "ee-xBrpa7Zyn"
+   },
+   "source": [
+    "## **TIME CONSTANTS**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "u2dW2nRf7obq"
+   },
+   "outputs": [],
+   "source": [
+    "TE = 100e-3  # s\n",
+    "TR = 3  # s\n",
+    "tau = TE / 2  # s\n",
+    "readout_time = 6.4e-3\n",
+    "pre_time = 8e-4  # s"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "OTw7M03g79bH"
+   },
+   "source": [
+    "## **RF**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "XDZyQrbL8I3Q"
+   },
+   "outputs": [],
+   "source": [
+    "flip90 = round(rf_flip * pi / 180, 3)\n",
+    "flip180 = 180 * pi / 180\n",
+    "rf90, gz90, _ = make_sinc_pulse(flip_angle=flip90, system=system, duration=4e-3, \n",
+    "                                slice_thickness=slice_thickness, apodization=0.5, \n",
+    "                                time_bw_product=4, return_gz = True)\n",
+    "rf180, gz180, _ = make_sinc_pulse(flip_angle=flip180, system=system, \n",
+    "                                  duration=2.5e-3, \n",
+    "                                  slice_thickness=slice_thickness, \n",
+    "                                  apodization=0.5, \n",
+    "                                time_bw_product=4, phase_offset=90 * pi/180, return_gz = True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "RFSHuUOG9LHK"
+   },
+   "source": [
+    "## **READOUT**\n",
+    "Readout gradients and related events"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "Q8p-CttI9dk9"
+   },
+   "outputs": [],
+   "source": [
+    "delta_k = 1 / fov\n",
+    "k_width = Nx * delta_k\n",
+    "gx = make_trapezoid(channel='x', system=system, flat_area=k_width, \n",
+    "                    flat_time=readout_time)\n",
+    "adc = make_adc(num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "o829kzm8kVFB"
+   },
+   "source": [
+    "## **PREPHASE AND REPHASE**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "m5zA1bMakTVs"
+   },
+   "outputs": [],
+   "source": [
+    "phase_areas = (np.arange(Ny) - (Ny / 2)) * delta_k\n",
+    "gz_reph = make_trapezoid(channel='z', system=system, area=-gz90.area / 2,\n",
+    "                         duration=2.5e-3)\n",
+    "gx_pre = make_trapezoid(channel='x', system=system, flat_area=k_width / 2, \n",
+    "                        flat_time=readout_time / 2)\n",
+    "gy_pre = make_trapezoid(channel='y', system=system, area=phase_areas[-1], \n",
+    "                        duration=2e-3)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "5Css5esAkYHo"
+   },
+   "source": [
+    "## **SPOILER**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "R1DOmoKKkawr"
+   },
+   "outputs": [],
+   "source": [
+    "gz_spoil = make_trapezoid(channel='z', system=system, area=gz90.area * 4,\n",
+    "                          duration=pre_time * 4)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "3F5JUpE9-4lo"
+   },
+   "source": [
+    "## **DELAYS**\n",
+    "Echo time (TE) and repetition time (TR). Here, TE is broken down into `delay1` and `delay2`."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "aOKRJclb_mDQ"
+   },
+   "outputs": [],
+   "source": [
+    "delay1 = tau - calc_duration(rf90) / 2 - calc_duration(gx_pre)\n",
+    "delay1 -= calc_duration(gz_spoil) - calc_duration(rf180) / 2\n",
+    "delay1 = make_delay(delay1)\n",
+    "delay2 = tau - calc_duration(rf180) / 2 - calc_duration(gz_spoil)\n",
+    "delay2 -= calc_duration(gx) / 2\n",
+    "delay2 = make_delay(delay2)\n",
+    "delay_TR = TR - calc_duration(rf90) / 2 - calc_duration(gx) / 2 - TE\n",
+    "delay_TR -= calc_duration(gy_pre)\n",
+    "delay_TR = make_delay(delay_TR)\n",
+    "print(f'delay_1: {delay1}')\n",
+    "print(f'delay_2: {delay1}')\n",
+    "print(f'delay_TR: {delay_TR}')"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "6Dq4wT-UAEOR"
+   },
+   "source": [
+    "## **CONSTRUCT SEQUENCE**\n",
+    "Construct sequence for one phase encode and multiple slices"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "B8ZmVkkrAXnK"
+   },
+   "outputs": [],
+   "source": [
+    "# Prepare RF offsets. This is required for multi-slice acquisition\n",
+    "delta_z = n_slices * slice_gap\n",
+    "z = np.linspace((-delta_z / 2), (delta_z / 2), n_slices) + rf_offset\n",
+    "\n",
+    "for k in range(nsa):  # Averages\n",
+    "  for j in range(n_slices):  # Slices\n",
+    "    # Apply RF offsets\n",
+    "    freq_offset = gz90.amplitude * z[j]\n",
+    "    rf90.freq_offset = freq_offset\n",
+    "\n",
+    "    freq_offset = gz180.amplitude * z[j]\n",
+    "    rf180.freq_offset = freq_offset\n",
+    "\n",
+    "    for i in range(Ny):  # Phase encodes\n",
+    "      seq.add_block(rf90, gz90)\n",
+    "      gy_pre = make_trapezoid(channel='y', system=system, \n",
+    "                              area=phase_areas[-i -1], duration=2e-3)\n",
+    "      seq.add_block(gx_pre, gy_pre, gz_reph)\n",
+    "      seq.add_block(delay1)\n",
+    "      seq.add_block(gz_spoil)\n",
+    "      seq.add_block(rf180, gz180)\n",
+    "      seq.add_block(gz_spoil)\n",
+    "      seq.add_block(delay2)\n",
+    "      seq.add_block(gx, adc)\n",
+    "      gy_pre = make_trapezoid(channel='y', system=system, \n",
+    "                              area=-phase_areas[-j -1], duration=2e-3)\n",
+    "      seq.add_block(gy_pre, gz_spoil)\n",
+    "      seq.add_block(delay_TR)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "l-YP9djBJCpC"
+   },
+   "source": [
+    "## **PLOTTING TIMNG DIAGRAM**"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "d_iCUR4nfoH9"
+   },
+   "outputs": [],
+   "source": [
+    "seq.plot(time_range=(0, 0.1))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {
+    "colab_type": "text",
+    "id": "fYNgdWc_KiK7"
+   },
+   "source": [
+    "## **GENERATING `.SEQ` FILE**\n",
+    "Uncomment the code in the cell below to generate a `.seq` file and download locally."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "6iN0aeuuqKRe"
+   },
+   "outputs": [],
+   "source": [
+    "# seq.write('t2_se_pypulseq_colab.seq')  # Save to disk\n",
+    "# from google.colab import files\n",
+    "# files.download('t2_se_pypulseq_colab.seq')  # Download locally"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 0,
+   "metadata": {
+    "colab": {},
+    "colab_type": "code",
+    "id": "4Q0b5w-lKtfP"
+   },
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "colab": {
+   "collapsed_sections": [],
+   "name": "write_t2_se.ipynb",
+   "private_outputs": true,
+   "provenance": []
+  },
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.6.3"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}

+ 43 - 0
LF_scanner/pypulseq/seq_examples/scripts/README.md

@@ -0,0 +1,43 @@
+<p align="center">
+
+![PyPulseq](../../../logo.png)
+
+</p>
+
+# PyPulseq: A Python Package for MRI Pulse Sequence Design
+
+Example reconstructions of the Gradient Recalled Echo (GRE) and Turbo Spin Echo (TSE) sequences executed on a 
+Siemens Prisma 3T scanner:
+
+### 1. Gradient Recalled Echo (GRE)
+
+| Parameter | Value |
+|-----------|-------|
+| Field of view | 256 x 256 mm^-3 |
+| Nx | 256 |
+| Ny | 256 |
+| Flip angle | 10 |
+| Number of slices | 1 |
+| Slice thickness | 3 mm |
+| TE | 4.3 ms |
+| TR | 10 ms |
+| Number of echoes | 16 |
+
+![Gradient Recalled Echo](example_recons/gre.png)
+
+---
+
+### 2. Turbo Spin Echo (TSE)
+
+| Parameter | Value |
+|-----------|-------|
+| Field of view | 256 x 256 mm^-3 |
+| Nx | 128 |
+| Ny | 128 |
+| Flip angle | 10 |
+| Number of slices | 1 |
+| Slice thickness | 5 mm |
+| TE | 12 ms |
+| TR | 2000 ms |
+
+![Turbo Spin Echo](example_recons/tse.png)

+ 0 - 0
LF_scanner/pypulseq/seq_examples/scripts/__init__.py


+ 100 - 0
LF_scanner/pypulseq/seq_examples/scripts/demo_read.py

@@ -0,0 +1,100 @@
+import numpy as np
+from matplotlib import pyplot as plt
+
+import pypulseq as pp
+
+"""
+Read a sequence into MATLAB. The `Sequence` class provides an implementation of the _open file format_ for MR sequences 
+described here: http://pulseq.github.io/specification.pdf. This example demonstrates parsing an MRI sequence stored in 
+this format, accessing sequence parameters and visualising the sequence.
+"""
+
+# Read a sequence file - a sequence can be loaded from the open MR file format using the `read` method.
+seq_name = "epi_rs.seq"
+
+system = pp.Opts(
+    B0=2.89
+)  # Need system here if we want 'detectRFuse' to detect fat-sat pulses
+seq = pp.Sequence(system)
+seq.read(seq_name, detect_rf_use=True)
+
+# Sanity check to see if the reading and writing are consistent
+seq.write("read_test.seq")
+# os_system(f'diff -s -u {seq_name} read_test.seq -echo')  # Linux only
+
+"""
+Access sequence parameters and blocks. Parameters defined with in the `[DEFINITIONS]` section of the sequence file 
+are accessed with the `get_definition()` method. These are user-specified definitions and do not effect the execution of 
+the sequence.
+"""
+seq_name = seq.get_definition("Name")
+
+# Calculate and display real TE, TR as well as slew rates and gradient amplitudes
+test_report = seq.test_report()
+print(test_report)
+
+# Sequence blocks are accessed with the `get_block()` method. As shown in the output the first block is a selective
+# excitation block and contains an RF pulse and gradient and on the z-channel.
+b1 = seq.get_block(1)
+
+# Further information about each event can be obtained by accessing the appropriate fields of the block struct. In
+# particular, the complex RF signal is stored in the field `signal`.
+rf = b1.rf
+
+plt.subplot(211)
+plt.plot(rf.t, np.abs(rf.signal))
+plt.ylabel("RF magnitude")
+
+plt.subplot(212)
+plt.plot(1e3 * rf.t, np.angle(rf.signal))
+plt.xlabel("t (ms)")
+plt.ylabel("RF phase")
+
+# The next three blocks contain: three gradient events; a delay; and readout gradient with ADC event, each with
+# corresponding fields defining the details of the events.
+b2 = seq.get_block(2)
+b3 = seq.get_block(3)
+b4 = seq.get_block(4)
+
+# Plot the sequence. Visualise the sequence using the `plot()` method of the class. This creates a new figure and shows
+# ADC, RF and gradient events. The axes are linked so zooming is consistent. In this example, a simple gradient echo
+# sequence for MRI is displayed.
+# seq.plot()
+
+"""
+The details of individual pulses are not well-represented when the entire sequence is visualised. Interactive zooming 
+is helpful here. Alternatively, a time range can be specified. An additional parameter also allows the display units to 
+be changed for easy reading. Further, the handle of the created figure can be returned if required.
+"""
+# seq.plot(time_range=[0, 16e-3], time_disp='ms')
+
+"""
+Modifying sequence blocks. In addition to loading a sequence and accessing sequence blocks, blocks # can be modified. 
+In this example, a Hamming window is applied to the # first RF pulse of the sequence and the flip angle is changed to 
+45 degrees. The remaining RF pulses are unchanged.
+"""
+rf2 = rf
+duration = rf2.t[-1]
+t = rf2.t - duration / 2  # Centre time about 0
+alpha = 0.5
+BW = 4 / duration  # Time bandwidth product = 4
+window = 1.0 - alpha + alpha * np.cos(2 * np.pi * t / duration)  # Hamming window
+signal = window * np.sinc(BW * t)
+
+# Normalise area to achieve 2*pi rotation
+signal = signal / (seq.rf_raster_time * np.sum(np.real(signal)))
+
+# Scale to 45 degree flip angle
+rf2.signal = signal * 45 / 360
+
+b1.rf = rf2
+seq.set_block(1, b1)
+
+# Second check to see what has changed
+seq.write("read_test2.seq")
+# os_system(f'diff -s -u {seq_name} read_test2.seq -echo')  # Linux only
+
+# The amplitude of the first rf pulse is reduced due to the reduced flip-angle. Notice the reduction is not exactly a
+# factor of two due to the windowing function.
+amp1_in_Hz = max(abs(seq.get_block(1).rf.signal))
+amp2_in_Hz = max(abs(seq.get_block(6).rf.signal))

BIN
LF_scanner/pypulseq/seq_examples/scripts/example_recons/gre.png


BIN
LF_scanner/pypulseq/seq_examples/scripts/example_recons/tse.png


+ 129 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_2Dt1_mprage.py

@@ -0,0 +1,129 @@
+from math import pi
+
+import numpy as np
+
+import pypulseq as pp
+
+Nx = 128
+Ny = 128
+n_slices = 3
+
+system = pp.Opts(
+    max_grad=32,
+    grad_unit="mT/m",
+    max_slew=130,
+    slew_unit="T/m/s",
+    grad_raster_time=10e-6,
+    rf_ringdown_time=10e-6,
+    rf_dead_time=100e-6,
+)
+seq = pp.Sequence(system)
+
+fov = 220e-3
+slice_thickness = 5e-3
+slice_gap = 15e-3
+
+delta_z = n_slices * slice_gap
+rf_offset = 0
+z = np.linspace((-delta_z / 2), (delta_z / 2), n_slices) + rf_offset
+
+# =========
+# RF90, RF180
+# =========
+flip = 12 * pi / 180
+rf, gz, _ = pp.make_sinc_pulse(
+    flip_angle=flip,
+    system=system,
+    duration=2e-3,
+    slice_thickness=slice_thickness,
+    apodization=0.5,
+    time_bw_product=4,
+    return_gz=True,
+)
+
+flip90 = 90 * pi / 180
+rf90 = pp.make_block_pulse(
+    flip_angle=flip90, system=system, duration=500e-6, time_bw_product=4
+)
+
+# =========
+# Readout
+# =========
+delta_k = 1 / fov
+k_width = Nx * delta_k
+readout_time = 6.4e-3
+gx = pp.make_trapezoid(
+    channel="x", system=system, flat_area=k_width, flat_time=readout_time
+)
+adc = pp.make_adc(num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time)
+
+# =========
+# Prephase and Rephase
+# =========
+phase_areas = (np.arange(Ny) - (Ny / 2)) * delta_k
+gy_pre = pp.make_trapezoid(
+    channel="y", system=system, area=phase_areas[-1], duration=2e-3
+)
+
+gx_pre = pp.make_trapezoid(channel="x", system=system, area=-gx.area / 2, duration=2e-3)
+
+gz_reph = pp.make_trapezoid(
+    channel="z", system=system, area=-gz.area / 2, duration=2e-3
+)
+
+# =========
+# Spoilers
+# =========
+pre_time = 8e-4
+gx_spoil = pp.make_trapezoid(
+    channel="x", system=system, area=gz.area * 4, duration=pre_time * 4
+)
+gy_spoil = pp.make_trapezoid(
+    channel="y", system=system, area=gz.area * 4, duration=pre_time * 4
+)
+gz_spoil = pp.make_trapezoid(
+    channel="z", system=system, area=gz.area * 4, duration=pre_time * 4
+)
+
+# =========
+# Delays
+# =========
+TE, TI, TR = 13e-3, 140e-3, 65e-3
+delay_TE = (
+    TE - pp.calc_duration(rf) / 2 - pp.calc_duration(gy_pre) - pp.calc_duration(gx) / 2
+)
+delay_TE = pp.make_delay(delay_TE)
+delay_TI = TI - pp.calc_duration(rf90) / 2 - pp.calc_duration(gx_spoil)
+delay_TI = pp.make_delay(delay_TI)
+delay_TR = (
+    TR
+    - pp.calc_duration(rf) / 2
+    - pp.calc_duration(gx) / 2
+    - pp.calc_duration(gy_pre)
+    - TE
+)
+delay_TR = pp.make_delay(delay_TR)
+
+for j in range(n_slices):
+    freq_offset = gz.amplitude * z[j]
+    rf.freq_offset = freq_offset
+
+    for i in range(Ny):
+        seq.add_block(rf90)
+        seq.add_block(gx_spoil, gy_spoil, gz_spoil)
+        seq.add_block(delay_TI)
+        seq.add_block(rf, gz)
+        gy_pre = pp.make_trapezoid(
+            channel="y", system=system, area=phase_areas[i], duration=2e-3
+        )
+        seq.add_block(gx_pre, gy_pre, gz_reph)
+        seq.add_block(delay_TE)
+        seq.add_block(gx, adc)
+        gy_pre = pp.make_trapezoid(
+            channel="y", system=system, area=-phase_areas[i], duration=2e-3
+        )
+        seq.add_block(gx_spoil, gy_pre)
+        seq.add_block(delay_TR)
+
+seq.set_definition(key="Name", value="2D T1 MPRAGE")
+seq.write("2d_mprage_pypulseq.seq")

+ 155 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_3Dt1_mprage.py

@@ -0,0 +1,155 @@
+from math import pi
+
+import numpy as np
+
+import pypulseq as pp
+
+Nx = 256
+Ny = 256
+Nz = 32
+
+system = pp.Opts(
+    max_grad=32,
+    grad_unit="mT/m",
+    max_slew=130,
+    slew_unit="T/m/s",
+    grad_raster_time=10e-6,
+    rf_ringdown_time=10e-6,
+    rf_dead_time=100e-6,
+)
+seq = pp.Sequence(system)
+
+fov = 256e-3
+fov_z = 256e-3
+slice_thickness = 1e-3
+section_thickness = 5e-3
+
+# =========
+# RF preparatory, excitation
+# =========
+flip_exc = 12 * pi / 180
+rf = pp.make_block_pulse(
+    flip_angle=flip_exc, system=system, duration=250e-6, time_bw_product=4
+)
+
+flip_prep = 90 * pi / 180
+rf_prep = pp.make_block_pulse(
+    flip_angle=flip_prep, system=system, duration=500e-6, time_bw_product=4
+)
+
+# =========
+# Readout
+# =========
+delta_k = 1 / fov
+k_width = Nx * delta_k
+readout_time = 3.5e-3
+gx = pp.make_trapezoid(
+    channel="x", system=system, flat_area=k_width, flat_time=readout_time
+)
+adc = pp.make_adc(num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time)
+
+# =========
+# Prephase and Rephase
+# =========
+delta_kz = 1 / fov_z
+phase_areas = (np.arange(Ny) - (Ny / 2)) * delta_k
+slice_areas = (np.arange(Nz) - (Nz / 2)) * delta_kz
+
+gx_pre = pp.make_trapezoid(channel="x", system=system, area=-gx.area / 2, duration=2e-3)
+gy_pre = pp.make_trapezoid(
+    channel="y", system=system, area=phase_areas[-1], duration=2e-3
+)
+
+# =========
+# Spoilers
+# =========
+pre_time = 6.4e-4
+gx_spoil = pp.make_trapezoid(
+    channel="x",
+    system=system,
+    area=(4 * np.pi) / (42.576e6 * delta_k * 1e-3) * 42.576e6,
+    duration=pre_time * 6,
+)
+gy_spoil = pp.make_trapezoid(
+    channel="y",
+    system=system,
+    area=(4 * np.pi) / (42.576e6 * delta_k * 1e-3) * 42.576e6,
+    duration=pre_time * 6,
+)
+gz_spoil = pp.make_trapezoid(
+    channel="z",
+    system=system,
+    area=(4 * np.pi) / (42.576e6 * delta_kz * 1e-3) * 42.576e6,
+    duration=pre_time * 6,
+)
+
+# =========
+# Extended trapezoids: gx, gx_spoil
+# =========
+t_gx_extended = np.array(
+    [0, gx.rise_time, gx.flat_time, (gx.rise_time * 2) + gx.flat_time + gx.fall_time]
+)
+amp_gx_extended = np.array([0, gx.amplitude, gx.amplitude, gx_spoil.amplitude])
+t_gx_spoil_extended = np.array(
+    [
+        0,
+        gx_spoil.rise_time + gx_spoil.flat_time,
+        gx_spoil.rise_time + gx_spoil.flat_time + gx_spoil.fall_time,
+    ]
+)
+amp_gx_spoil_extended = np.array([gx_spoil.amplitude, gx_spoil.amplitude, 0])
+
+gx_extended = pp.make_extended_trapezoid(
+    channel="x", times=t_gx_extended, amplitudes=amp_gx_extended
+)
+gx_spoil_extended = pp.make_extended_trapezoid(
+    channel="x", times=t_gx_spoil_extended, amplitudes=amp_gx_spoil_extended
+)
+
+# =========
+# Delays
+# =========
+TE, TI, TR, T_recovery = 4e-3, 140e-3, 10e-3, 1e-3
+delay_TE = (
+    TE - pp.calc_duration(rf) / 2 - pp.calc_duration(gx_pre) - pp.calc_duration(gx) / 2
+)
+delay_TI = TI - pp.calc_duration(rf_prep) / 2 - pp.calc_duration(gx_spoil)
+delay_TR = (
+    TR
+    - pp.calc_duration(rf)
+    - pp.calc_duration(gx_pre)
+    - pp.calc_duration(gx)
+    - pp.calc_duration(gx_spoil)
+)
+
+for i in range(Ny):
+    gy_pre = pp.make_trapezoid(
+        channel="y", system=system, area=phase_areas[i], duration=2e-3
+    )
+
+    seq.add_block(rf_prep)
+    seq.add_block(gx_spoil, gy_spoil, gz_spoil)
+    seq.add_block(pp.make_delay(delay_TI))
+
+    for j in range(Nz):
+        gz_pre = pp.make_trapezoid(
+            channel="z", system=system, area=slice_areas[j], duration=2e-3
+        )
+        gz_reph = pp.make_trapezoid(
+            channel="z", system=system, area=-slice_areas[j], duration=2e-3
+        )
+
+        seq.add_block(rf)
+        seq.add_block(gx_pre, gy_pre, gz_pre)
+        # Skip TE: readout_time = 3.5e3 --> TE = -2.168404344971009e-19
+        # seq.add_block(pp.make_delay(delay_TE))
+        seq.add_block(gx_extended, adc)
+        seq.add_block(gx_spoil_extended, gz_reph)
+        seq.add_block(pp.make_delay(delay_TR))
+
+    seq.add_block(pp.make_delay(T_recovery))
+
+seq.set_definition(key="Name", value="3D T1 MPRAGE")
+
+seq.write("256_3d_t1_mprage_pypulseq.seq")
+# seq.plot(time_range=(0, TI + TR + 2e-3))

+ 196 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_MPRAGE.py

@@ -0,0 +1,196 @@
+from types import SimpleNamespace
+
+import numpy as np
+
+import pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "mprage_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=24,
+        grad_unit="mT/m",
+        max_slew=100,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    alpha = 7  # Flip angle
+    ro_dur = 5017.6e-6
+    ro_os = 1  # Readout oversampling
+    ro_spoil = 3  # Additional k-max excursion for RO spoiling
+    TI = 1.1
+    TR_out = 2.5
+
+    rf_spoiling_inc = 117
+    rf_len = 100e-6
+    ax = SimpleNamespace()  # Encoding axes
+
+    fov = np.array([192, 240, 256]) * 1e-3  # Define FOV and resolution
+    N = [192, 240, 256]
+    ax.d1 = "z"  # Fastest dimension (readout)
+    ax.d2 = "x"  # Second-fastest dimension (inner phase-encoding loop)
+    xyz = ["x", "y", "z"]
+    ax.d3 = np.setdiff1d(xyz, [ax.d1, ax.d2])[0]
+    ax.n1 = xyz.index(ax.d1)
+    ax.n2 = xyz.index(ax.d2)
+    ax.n3 = xyz.index(ax.d3)
+
+    # Create alpha-degree hard pulse and gradient
+    rf = pp.make_block_pulse(
+        flip_angle=alpha * np.pi / 180, system=system, duration=rf_len
+    )
+    rf180 = pp.make_adiabatic_pulse(
+        pulse_type="hypsec", system=system, duration=10.24e-3, dwell=1e-5
+    )
+
+    # Define other gradients and ADC events
+    deltak = 1 / fov
+    gro = pp.make_trapezoid(
+        channel=ax.d1,
+        amplitude=N[ax.n1] * deltak[ax.n1] / ro_dur,
+        flat_time=np.ceil((ro_dur + system.adc_dead_time) / system.grad_raster_time)
+        * system.grad_raster_time,
+        system=system,
+    )
+    adc = pp.make_adc(
+        num_samples=N[ax.n1] * ro_os,
+        duration=ro_dur,
+        delay=gro.rise_time,
+        system=system,
+    )
+    #  First 0.5 is necessary to account for the Siemens sampling in the center of the dwell periods
+    gro_pre = pp.make_trapezoid(
+        channel=ax.d1,
+        area=-gro.amplitude
+        * (adc.dwell * (adc.num_samples / 2 + 0.5) + 0.5 * gro.rise_time),
+        system=system,
+    )
+    gpe1 = pp.make_trapezoid(
+        channel=ax.d2, area=-deltak[ax.n2] * (N[ax.n2] / 2), system=system
+    )  # Maximum PE1 gradient
+    gpe2 = pp.make_trapezoid(
+        channel=ax.d3, area=-deltak[ax.n3] * (N[ax.n3] / 2), system=system
+    )  # Maximum PE2 gradient
+    # Spoil with 4x cycles per voxel
+    gsl_sp = pp.make_trapezoid(
+        channel=ax.d3, area=np.max(deltak * N) * 4, duration=10e-3, system=system
+    )
+
+    # We cut the RO gradient into two parts for the optimal spoiler timing
+    gro1, gro_Sp = pp.split_gradient_at(
+        grad=gro, time_point=gro.rise_time + gro.flat_time
+    )
+    # Gradient spoiling
+    if ro_spoil > 0:
+        gro_Sp = pp.make_extended_trapezoid_area(
+            channel=gro.channel,
+            grad_start=gro.amplitude,
+            grad_end=0,
+            area=deltak[ax.n1] / 2 * N[ax.n1] * ro_spoil,
+            system=system,
+        )[0]
+
+    # Calculate timing of the fast loop. We will have two blocks in the inner loop:
+    # 1: spoilers/rewinders + RF
+    # 2: prewinder, phase enconding + readout
+    rf.delay = pp.calc_duration(gro_Sp, gpe1, gpe2)
+    gro_pre, _, _ = pp.align(right=[gro_pre, gpe1, gpe2])
+    gro1.delay = pp.calc_duration(gro_pre)
+    adc.delay = gro1.delay + gro.rise_time
+    gro1 = pp.add_gradients(grads=[gro1, gro_pre], system=system)
+    TR_inner = pp.calc_duration(rf) + pp.calc_duration(gro1)  # For TI delay
+    # pe_steps -- control reordering
+    pe1_steps = ((np.arange(N[ax.n2])) - N[ax.n2] / 2) / N[ax.n2] * 2
+    pe2_steps = ((np.arange(N[ax.n3])) - N[ax.n3] / 2) / N[ax.n3] * 2
+    # TI calc
+    TI_delay = (
+        np.round(
+            (
+                TI
+                - (np.where(pe1_steps == 0)[0][0]) * TR_inner
+                - (pp.calc_duration(rf180) - pp.calc_rf_center(rf180)[0] - rf180.delay)
+                - rf.delay
+                - pp.calc_rf_center(rf)[0]
+            )
+            / system.block_duration_raster
+        )
+        * system.block_duration_raster
+    )
+    TR_out_delay = TR_out - TR_inner * N[ax.n2] - TI_delay - pp.calc_duration(rf180)
+
+    # All LABELS / counters an flags are automatically initialized to 0 in the beginning, no need to define initial 0's
+    # so we will just increment LIN after the ADC event (e.g. during the spoiler)
+    label_inc_lin = pp.make_label(type="INC", label="LIN", value=1)
+    label_inc_par = pp.make_label(type="INC", label="PAR", value=1)
+    label_reset_par = pp.make_label(type="SET", label="PAR", value=0)
+
+    # Pre-register objects that do not change while looping
+    result = seq.register_grad_event(gsl_sp)
+    gsl_sp.id = result if isinstance(result, int) else result[0]
+    result = seq.register_grad_event(gro_Sp)
+    gro_Sp.id = result if isinstance(result, int) else result[0]
+    result = seq.register_grad_event(gro1)
+    gro1.id = result if isinstance(result, int) else result[0]
+    # Phase of the RF object will change, therefore we only pre-register the shapes
+    _, rf.shape_IDs = seq.register_rf_event(rf)
+    rf180.id, rf180.shape_IDs = seq.register_rf_event(rf180)
+    label_inc_par.id = seq.register_label_event(label_inc_par)
+
+    # Sequence
+    for j in range(N[ax.n3]):
+        seq.add_block(rf180)
+        seq.add_block(pp.make_delay(TI_delay), gsl_sp)
+        rf_phase = 0
+        rf_inc = 0
+        # Pre-register PE events that repeat in the inner loop
+        gpe2je = pp.scale_grad(grad=gpe2, scale=pe2_steps[j])
+        gpe2je.id = seq.register_grad_event(gpe2je)
+        gpe2jr = pp.scale_grad(grad=gpe2, scale=-pe2_steps[j])
+        gpe2jr.id = seq.register_grad_event(gpe2jr)
+
+        for i in range(N[ax.n2]):
+            rf.phase_offset = rf_phase / 180 * np.pi
+            adc.phase_offset = rf_phase / 180 * np.pi
+            rf_inc = np.mod(rf_inc + rf_spoiling_inc, 360.0)
+            rf_phase = np.mod(rf_phase + rf_inc, 360.0)
+
+            if i == 0:
+                seq.add_block(rf)
+            else:
+                seq.add_block(
+                    rf,
+                    gro_Sp,
+                    pp.scale_grad(grad=gpe1, scale=-pe1_steps[i - 1]),
+                    gpe2jr,
+                    label_inc_par,
+                )
+            seq.add_block(
+                adc, gro1, pp.scale_grad(grad=gpe1, scale=pe1_steps[i]), gpe2je
+            )
+        seq.add_block(
+            gro_Sp, pp.make_delay(TR_out_delay), label_reset_par, label_inc_lin
+        )
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot(time_range=[0, TR_out * 2], label="PAR")
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 114 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_epi.py

@@ -0,0 +1,114 @@
+"""
+Demo low-performance EPI sequence without ramp-sampling.
+"""
+
+import numpy as np
+
+import MRI_seq.pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "epi_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    # Define FOV and resolution
+    fov = 220e-3
+    Nx = 64
+    Ny = 64
+    slice_thickness = 3e-3  # Slice thickness
+    n_slices = 3
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=32,
+        grad_unit="mT/m",
+        max_slew=130,
+        slew_unit="T/m/s",
+        rf_ringdown_time=30e-6,
+        rf_dead_time=100e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create 90 degree slice selection pulse and gradient
+    rf, gz, _ = pp.make_sinc_pulse(
+        flip_angle=np.pi / 2,
+        system=system,
+        duration=3e-3,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        return_gz=True,
+    )
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+    dwell_time = 4e-6
+    readout_time = Nx * dwell_time
+    flat_time = np.ceil(readout_time * 1e5) * 1e-5  # round-up to the gradient raster
+    gx = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        amplitude=k_width / readout_time,
+        flat_time=flat_time,
+    )
+    adc = pp.make_adc(
+        num_samples=Nx,
+        duration=readout_time,
+        delay=gx.rise_time + flat_time / 2 - (readout_time - dwell_time) / 2,
+    )
+
+    # Pre-phasing gradients
+    pre_time = 8e-4
+    gx_pre = pp.make_trapezoid(
+        channel="x", system=system, area=-gx.area / 2, duration=pre_time
+    )
+    gz_reph = pp.make_trapezoid(
+        channel="z", system=system, area=-gz.area / 2, duration=pre_time
+    )
+    gy_pre = pp.make_trapezoid(
+        channel="y", system=system, area=-Ny / 2 * delta_k, duration=pre_time
+    )
+
+    # Phase blip in the shortest possible time
+    dur = np.ceil(2 * np.sqrt(delta_k / system.max_slew) / 10e-6) * 10e-6
+    gy = pp.make_trapezoid(channel="y", system=system, area=delta_k, duration=dur)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Define sequence blocks
+    for s in range(n_slices):
+        rf.freq_offset = gz.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+        seq.add_block(rf, gz)
+        seq.add_block(gx_pre, gy_pre, gz_reph)
+        for i in range(Ny):
+            seq.add_block(gx, adc)  # Read one line of k-space
+            seq.add_block(gy)  # Phase blip
+            gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed! Error listing follows:")
+        print(error_report)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()  # Plot sequence waveforms
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 174 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_epi_label.py

@@ -0,0 +1,174 @@
+"""
+Demo low-performance EPI sequence without ramp-sampling.
+In addition, it demonstrates how the LABEL extension can be used to set data header values, which can be used either in
+combination with integrated image reconstruction or to guide the off-line reconstruction tools.
+"""
+
+import numpy as np
+
+import MRI_seq.pypulseq as pp
+from MRI_seq.pypulseq import calc_rf_center
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "epi_lable_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 220e-3  # Define FOV and resolution
+    Nx = 96
+    Ny = 96
+    slice_thickness = 3e-3  # Slice thickness
+    n_slices = 7
+    n_reps = 4
+    navigator = 3
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=32,
+        grad_unit="mT/m",
+        max_slew=130,
+        slew_unit="T/m/s",
+        rf_ringdown_time=30e-6,
+        rf_dead_time=100e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create 90 degree slice selection pulse and gradient
+    rf, gz, _ = pp.make_sinc_pulse(
+        flip_angle=np.pi / 2,
+        system=system,
+        duration=3e-3,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        return_gz=True,
+    )
+
+    # Define trigger
+    trig = pp.make_trigger(channel="physio1", duration=2000e-6)
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+    dwell_time = 4e-6
+    readout_time = Nx * dwell_time
+    flat_time = np.ceil(readout_time * 1e5) * 1e-5  # Round-up to the gradient raster
+    gx = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        amplitude=k_width / readout_time,
+        flat_time=flat_time,
+    )
+    adc = pp.make_adc(
+        num_samples=Nx,
+        duration=readout_time,
+        delay=gx.rise_time + flat_time / 2 - (readout_time - dwell_time) / 2,
+    )
+
+    # Pre-phasing gradients
+    pre_time = 8e-4
+    gx_pre = pp.make_trapezoid(
+        channel="x", system=system, area=-gx.area / 2, duration=pre_time
+    )
+    gz_reph = pp.make_trapezoid(
+        channel="z", system=system, area=-gz.area / 2, duration=pre_time
+    )
+    gy_pre = pp.make_trapezoid(
+        channel="y", system=system, area=Ny / 2 * delta_k, duration=pre_time
+    )
+
+    # Phase blip in the shortest possible time
+    dur = np.ceil(2 * np.sqrt(delta_k / system.max_slew) / 10e-6) * 10e-6
+    gy = pp.make_trapezoid(channel="y", system=system, area=-delta_k, duration=dur)
+
+    gz_spoil = pp.make_trapezoid(channel="z", system=system, area=delta_k * Nx * 4)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Define sequence blocks
+    for r in range(n_reps):
+        seq.add_block(trig, pp.make_label(type="SET", label="SLC", value=0))
+        for s in range(n_slices):
+            rf.freq_offset = gz.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            # Compensate for the slide-offset induced phase
+            rf.phase_offset = -rf.freq_offset * calc_rf_center(rf)[0]
+            seq.add_block(rf, gz)
+            seq.add_block(
+                gx_pre,
+                gz_reph,
+                pp.make_label(type="SET", label="NAV", value=1),
+                pp.make_label(type="SET", label="LIN", value=np.round(Ny / 2)),
+            )
+            for n in range(navigator):
+                seq.add_block(
+                    gx,
+                    adc,
+                    pp.make_label(type="SET", label="REV", value=gx.amplitude < 0),
+                    pp.make_label(type="SET", label="SEG", value=gx.amplitude < 0),
+                    pp.make_label(type="SET", label="AVG", value=n + 1 == 3),
+                )
+                if n + 1 != navigator:
+                    # Dummy blip pulse to maintain identical RO gradient timing and the corresponding eddy currents
+                    seq.add_block(pp.make_delay(pp.calc_duration(gy)))
+
+                gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+
+            # Reset lin/nav/avg
+            seq.add_block(
+                gy_pre,
+                pp.make_label(type="SET", label="LIN", value=0),
+                pp.make_label(type="SET", label="NAV", value=0),
+                pp.make_label(type="SET", label="AVG", value=0),
+            )
+
+            for i in range(Ny):
+                seq.add_block(
+                    pp.make_label(type="SET", label="REV", value=gx.amplitude < 0),
+                    pp.make_label(type="SET", label="SEG", value=gx.amplitude < 0),
+                )
+                seq.add_block(gx, adc)  # Read one line of k-space
+                # Phase blip
+                seq.add_block(gy, pp.make_label(type="INC", label="LIN", value=1))
+                gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+
+            seq.add_block(
+                gz_spoil,
+                pp.make_delay(0.1),
+                pp.make_label(type="INC", label="SLC", value=1),
+            )
+            if np.remainder(navigator + Ny, 2) != 0:
+                gx.amplitude = -gx.amplitude
+
+        seq.add_block(pp.make_label(type="INC", label="REP", value=1))
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed! Error listing follows:")
+        print(error_report)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot(
+            time_range=(0, 0.1), time_disp="ms", label="SEG, LIN, SLC"
+        )  # Plot sequence waveforms
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        # Prepare sequence report
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness * n_slices])
+        seq.set_definition(key="Name", value="epi_lbl")
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 139 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_epi_se.py

@@ -0,0 +1,139 @@
+import math
+
+import numpy as np
+
+import MRI_seq.pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "epi_se_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 256e-3  # Define FOV and resolution
+    Nx = 64
+    Ny = 64
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=32,
+        grad_unit="mT/m",
+        max_slew=130,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=20e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create 90 degree slice selection pulse and gradient
+    rf, gz, _ = pp.make_sinc_pulse(
+        flip_angle=np.pi / 2,
+        system=system,
+        duration=3e-3,
+        slice_thickness=3e-3,
+        apodization=0.5,
+        time_bw_product=4,
+        return_gz=True,
+    )
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+    readout_time = 3.2e-4
+    gx = pp.make_trapezoid(
+        channel="x", system=system, flat_area=k_width, flat_time=readout_time
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, system=system, duration=gx.flat_time, delay=gx.rise_time
+    )
+
+    # Pre-phasing gradients
+    pre_time = 8e-4
+    gz_reph = pp.make_trapezoid(
+        channel="z", system=system, area=-gz.area / 2, duration=pre_time
+    )
+    # Do not need minus for in-plane prephasers because of the spin-echo (position reflection in k-space)
+    gx_pre = pp.make_trapezoid(
+        channel="x", system=system, area=gx.area / 2 - delta_k / 2, duration=pre_time
+    )
+    gy_pre = pp.make_trapezoid(
+        channel="y", system=system, area=Ny / 2 * delta_k, duration=pre_time
+    )
+
+    # Phase blip in shortest possible time
+    dur = math.ceil(2 * math.sqrt(delta_k / system.max_slew) / 10e-6) * 10e-6
+    gy = pp.make_trapezoid(channel="y", system=system, area=delta_k, duration=dur)
+
+    # Refocusing pulse with spoiling gradients
+    rf180 = pp.make_block_pulse(
+        flip_angle=np.pi, system=system, duration=500e-6, use="refocusing"
+    )
+    gz_spoil = pp.make_trapezoid(
+        channel="z", system=system, area=gz.area * 2, duration=3 * pre_time
+    )
+
+    # Calculate delay time
+    TE = 60e-3
+    duration_to_center = (Nx / 2 + 0.5) * pp.calc_duration(
+        gx
+    ) + Ny / 2 * pp.calc_duration(gy)
+    rf_center_incl_delay = rf.delay + pp.calc_rf_center(rf)[0]
+    rf180_center_incl_delay = rf180.delay + pp.calc_rf_center(rf180)[0]
+    delay_TE1 = (
+        TE / 2
+        - pp.calc_duration(gz)
+        + rf_center_incl_delay
+        - pre_time
+        - pp.calc_duration(gz_spoil)
+        - rf180_center_incl_delay
+    )
+    delay_TE2 = (
+        TE / 2
+        - pp.calc_duration(rf180)
+        + rf180_center_incl_delay
+        - pp.calc_duration(gz_spoil)
+        - duration_to_center
+    )
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Define sequence blocks
+    seq.add_block(rf, gz)
+    seq.add_block(gx_pre, gy_pre, gz_reph)
+    seq.add_block(pp.make_delay(delay_TE1))
+    seq.add_block(gz_spoil)
+    seq.add_block(rf180)
+    seq.add_block(gz_spoil)
+    seq.add_block(pp.make_delay(delay_TE2))
+    for i in range(Ny):
+        seq.add_block(gx, adc)  # Read one line of k-space
+        seq.add_block(gy)  # Phase blip
+        gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+    seq.add_block(pp.make_delay(1e-4))
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed! Error listing follows:")
+        print(error_report)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 287 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_epi_se_rs.py

@@ -0,0 +1,287 @@
+"""
+This is an experimental high-performance EPI sequence which uses split gradients to overlap blips with the readout 
+gradients combined with ramp-sampling.
+"""
+import math
+
+import numpy as np
+
+import MRI_seq.pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "epi_se_rs_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 250e-3  # Define FOV and resolution
+    Nx = 64
+    Ny = 64
+    slice_thickness = 3e-3  # Slice thickness
+    n_slices = 3
+    TE = 40e-3
+
+    pe_enable = 1  # Flag to quickly disable phase encoding (1/0) as needed for the delay calibration
+    ro_os = 1  # Oversampling factor
+    readout_time = 4.2e-4  # Readout bandwidth
+    # Partial Fourier factor: 1: full sampling; 0: start with ky=0
+    part_fourier_factor = 0.75
+
+    t_RF_ex = 2e-3
+    t_RF_ref = 2e-3
+    spoil_factor = 1.5  # Spoiling gradient around the pi-pulse (rf180)
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=32,
+        grad_unit="mT/m",
+        max_slew=130,
+        slew_unit="T/m/s",
+        rf_ringdown_time=30e-6,
+        rf_dead_time=100e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create fat-sat pulse
+    B0 = 2.89
+    sat_ppm = -3.45
+    sat_freq = sat_ppm * 1e-6 * B0 * system.gamma
+    rf_fs = pp.make_gauss_pulse(
+        flip_angle=110 * np.pi / 180,
+        system=system,
+        duration=8e-3,
+        bandwidth=np.abs(sat_freq),
+        freq_offset=sat_freq,
+    )
+    gz_fs = pp.make_trapezoid(
+        channel="z", system=system, delay=pp.calc_duration(rf_fs), area=1 / 1e-4
+    )
+
+    # Create 90 degree slice selection pulse and gradient
+    rf, gz, gz_reph = pp.make_sinc_pulse(
+        flip_angle=np.pi / 2,
+        system=system,
+        duration=t_RF_ex,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        return_gz=True,
+    )
+
+    # Create 90 degree slice refocusing pulse and gradients
+    rf180, gz180, _ = pp.make_sinc_pulse(
+        flip_angle=np.pi,
+        system=system,
+        duration=t_RF_ref,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        phase_offset=np.pi / 2,
+        use="refocusing",
+        return_gz=True,
+    )
+    _, gzr1_t, gzr1_a = pp.make_extended_trapezoid_area(
+        channel="z",
+        grad_start=0,
+        grad_end=gz180.amplitude,
+        area=spoil_factor * gz.area,
+        system=system,
+    )
+    _, gzr2_t, gzr2_a = pp.make_extended_trapezoid_area(
+        channel="z",
+        grad_start=gz180.amplitude,
+        grad_end=0,
+        area=-gz_reph.area + spoil_factor * gz.area,
+        system=system,
+    )
+    if gz180.delay > (gzr1_t[3] - gz180.rise_time):
+        gz180.delay -= gzr1_t[3] - gz180.rise_time
+    else:
+        rf180.delay += (gzr1_t[3] - gz180.rise_time) - gz180.delay
+    gz180n = pp.make_extended_trapezoid(
+        channel="z",
+        system=system,
+        times=np.array([*gzr1_t, *gzr1_t[3] + gz180.flat_time + gzr2_t]) + gz180.delay,
+        amplitudes=np.array([*gzr1_a, *gzr2_a]),
+    )
+
+    # Define the output trigger to play out with every slice excitation
+    trig = pp.make_digital_output_pulse(channel="osc0", duration=100e-6)
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+
+    # Phase blip in shortest possible time
+    # Round up the duration to 2x gradient raster time
+    blip_duration = (
+        np.ceil(2 * np.sqrt(delta_k / system.max_slew) / 10e-6 / 2) * 10e-6 * 2
+    )
+    # Use negative blips to save one k-space line on our way to center of k-space
+    gy = pp.make_trapezoid(
+        channel="y", system=system, area=-delta_k, duration=blip_duration
+    )
+
+    # Readout gradient is a truncated trapezoid with dead times at the beginning and at the end each equal to a half of
+    # blip duration. The area between the blips should be defined by k_width. We do a two-step calculation: we first
+    # increase the area assuming maximum slew rate and then scale down the amplitude to fix the area
+    extra_area = blip_duration / 2 * blip_duration / 2 * system.max_slew
+    gx = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        area=k_width + extra_area,
+        duration=readout_time + blip_duration,
+    )
+    actual_area = (
+        gx.area
+        - gx.amplitude / gx.rise_time * blip_duration / 2 * blip_duration / 2 / 2
+    )
+    actual_area -= (
+        gx.amplitude / gx.fall_time * blip_duration / 2 * blip_duration / 2 / 2
+    )
+    gx.amplitude = gx.amplitude / actual_area * k_width
+    gx.area = gx.amplitude * (gx.flat_time + gx.rise_time / 2 + gx.fall_time / 2)
+    gx.flat_area = gx.amplitude * gx.flat_time
+
+    # Calculate ADC
+    # We use ramp sampling, so we have to calculate the dwell time and the number of samples, which will be quite
+    # different from Nx and readout_time/Nx, respectively.
+    adc_dwell_nyquist = delta_k / gx.amplitude / ro_os
+    # Round-down dwell time to 100 ns
+    adc_dwell = math.floor(adc_dwell_nyquist * 1e7) * 1e-7
+    # Number of samples on Siemens needs to be divisible by 4
+    adc_samples = math.floor(readout_time / adc_dwell / 4) * 4
+    adc = pp.make_adc(num_samples=adc_samples, dwell=adc_dwell, delay=blip_duration / 2)
+    # Realign the ADC with respect to the gradient
+    # Supposedly Siemens samples at center of dwell period
+    time_to_center = adc_dwell * ((adc_samples - 1) / 2 + 0.5)
+    # Adjust delay to align the trajectory with the gradient. We have to align the delay to 1us
+    adc.delay = round((gx.rise_time + gx.flat_time / 2 - time_to_center) * 1e6) * 1e-6
+    # This rounding actually makes the sampling points on odd and even readouts to appear misaligned. However, on the
+    # real hardware this misalignment is much stronger anyways due to the gradient delays
+
+    # Split the blip into two halves and produce a combined synthetic gradient
+    gy_parts = pp.split_gradient_at(
+        grad=gy, time_point=blip_duration / 2, system=system
+    )
+    gy_blipup, gy_blipdown, _ = pp.align(right=gy_parts[0], left=[gy_parts[1], gx])
+    gy_blipdownup = pp.add_gradients((gy_blipdown, gy_blipup), system=system)
+
+    # pe_enable support
+    gy_blipup.waveform = gy_blipup.waveform * pe_enable
+    gy_blipdown.waveform = gy_blipdown.waveform * pe_enable
+    gy_blipdownup.waveform = gy_blipdownup.waveform * pe_enable
+
+    # Phase encoding and partial Fourier
+    # PE steps prior to ky=0, excluding the central line
+    Ny_pre = round(part_fourier_factor * Ny / 2 - 1)
+    # PE lines after the k-space center including the central line
+    Ny_post = round(Ny / 2 + 1)
+    Ny_meas = Ny_pre + Ny_post
+
+    # Pre-phasing gradients
+    gx_pre = pp.make_trapezoid(channel="x", system=system, area=-gx.area / 2)
+    gy_pre = pp.make_trapezoid(channel="y", system=system, area=Ny_pre * delta_k)
+
+    gx_pre, gy_pre = pp.align(right=gx_pre, left=gy_pre)
+    # Relax the PE prephaser to reduce stimulation
+    gy_pre = pp.make_trapezoid(
+        "y", system=system, area=gy_pre.area, duration=pp.calc_duration(gx_pre, gy_pre)
+    )
+    gy_pre.amplitude = gy_pre.amplitude * pe_enable
+
+    # Calculate delay times
+    duration_to_center = (Ny_pre + 0.5) * pp.calc_duration(gx)
+    rf_center_incl_delay = rf.delay + pp.calc_rf_center(rf)[0]
+    rf180_center_incl_delay = rf180.delay + pp.calc_rf_center(rf180)[0]
+    delay_TE1 = (
+        math.ceil(
+            (
+                TE / 2
+                - pp.calc_duration(rf, gz)
+                + rf_center_incl_delay
+                - rf180_center_incl_delay
+            )
+            / system.grad_raster_time
+        )
+        * system.grad_raster_time
+    )
+    delay_TE2 = (
+        math.ceil(
+            (
+                TE / 2
+                - pp.calc_duration(rf180, gz180n)
+                + rf180_center_incl_delay
+                - duration_to_center
+            )
+            / system.grad_raster_time
+        )
+        * system.grad_raster_time
+    )
+    assert delay_TE1 >= 0
+    # Now we merge slice refocusing, TE delay and pre-phasers into a single block
+    delay_TE2 = delay_TE2 + pp.calc_duration(rf180, gz180n)
+    gx_pre.delay = 0
+    gx_pre.delay = delay_TE2 - pp.calc_duration(gx_pre)
+    assert gx_pre.delay >= pp.calc_duration(rf180)  # gx_pre may not overlap with the RF
+    gy_pre.delay = pp.calc_duration(rf180)
+    # gy_pre may not shift the timing
+    assert pp.calc_duration(gy_pre) <= pp.calc_duration(gx_pre)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Define sequence blocks
+    for s in range(n_slices):
+        seq.add_block(rf_fs, gz_fs)
+        rf.freq_offset = gz.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+        rf180.freq_offset = gz180.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+        seq.add_block(rf, gz, trig)
+        seq.add_block(pp.make_delay(delay_TE1))
+        seq.add_block(rf180, gz180n, pp.make_delay(delay_TE2), gx_pre, gy_pre)
+        for i in range(1, Ny_meas + 1):
+            if i == 1:
+                # Read the first line of k-space with a single half-blip at the end
+                seq.add_block(gx, gy_blipup, adc)
+            elif i == Ny_meas:
+                # Read the last line of k-space with a single half-blip at the beginning
+                seq.add_block(gx, gy_blipdown, adc)
+            else:
+                # Read an intermediate line of k-space with a half-blip at the beginning and a half-blip at the end
+                seq.add_block(gx, gy_blipdownup, adc)
+            gx.amplitude = -gx.amplitude  # Reverse polarity of read gradient
+
+    # Check whether the timing of the sequence is correct
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # Very optional slow step, but useful for testing during development e.g. for the real TE, TR or for staying within
+    # slew-rate limits
+    rep = seq.test_report()
+    print(rep)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        # Prepare the sequence output for the scanner
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness])
+        seq.set_definition(key="Name", value="epi")
+
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 158 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_gre.py

@@ -0,0 +1,158 @@
+import math
+
+import numpy as np
+
+import pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "gre_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    # Create a new sequence object
+    seq = pp.Sequence()
+    fov = 256e-3  # Define FOV and resolution
+    Nx = 256
+    Ny = 256
+    alpha = 10  # flip angle
+    slice_thickness = 3e-3  # slice
+    TR = 12e-3  # Repetition time
+    TE = 5e-3  # Echo time
+
+    rf_spoiling_inc = 117  # RF spoiling increment
+
+    system = pp.Opts(
+        max_grad=28,
+        grad_unit="mT/m",
+        max_slew=150,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    rf, gz, _ = pp.make_sinc_pulse(
+        flip_angle=alpha * math.pi / 180,
+        duration=3e-3,
+        slice_thickness=slice_thickness,
+        apodization=0.42,
+        time_bw_product=4,
+        system=system,
+        return_gz=True,
+    )
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    gx = pp.make_trapezoid(
+        channel="x", flat_area=Nx * delta_k, flat_time=3.2e-3, system=system
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time, system=system
+    )
+    gx_pre = pp.make_trapezoid(
+        channel="x", area=-gx.area / 2, duration=1e-3, system=system
+    )
+    gz_reph = pp.make_trapezoid(
+        channel="z", area=-gz.area / 2, duration=1e-3, system=system
+    )
+    phase_areas = (np.arange(Ny) - Ny / 2) * delta_k
+
+    # gradient spoiling
+    gx_spoil = pp.make_trapezoid(channel="x", area=2 * Nx * delta_k, system=system)
+    gz_spoil = pp.make_trapezoid(channel="z", area=4 / slice_thickness, system=system)
+
+    # Calculate timing
+    delay_TE = (
+        np.ceil(
+            (
+                TE
+                - pp.calc_duration(gx_pre)
+                - gz.fall_time
+                - gz.flat_time / 2
+                - pp.calc_duration(gx) / 2
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    delay_TR = (
+        np.ceil(
+            (
+                TR
+                - pp.calc_duration(gz)
+                - pp.calc_duration(gx_pre)
+                - pp.calc_duration(gx)
+                - delay_TE
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+
+    assert np.all(delay_TE >= 0)
+    assert np.all(delay_TR >= pp.calc_duration(gx_spoil, gz_spoil))
+
+    rf_phase = 0
+    rf_inc = 0
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Loop over phase encodes and define sequence blocks
+    for i in range(Ny):
+        rf.phase_offset = rf_phase / 180 * np.pi
+        adc.phase_offset = rf_phase / 180 * np.pi
+        rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]
+        rf_phase = divmod(rf_phase + rf_inc, 360.0)[1]
+
+        seq.add_block(rf, gz)
+        gy_pre = pp.make_trapezoid(
+            channel="y",
+            area=phase_areas[i],
+            duration=pp.calc_duration(gx_pre),
+            system=system,
+        )
+        seq.add_block(gx_pre, gy_pre, gz_reph)
+        seq.add_block(pp.make_delay(delay_TE))
+        seq.add_block(gx, adc)
+        gy_pre.amplitude = -gy_pre.amplitude
+        seq.add_block(pp.make_delay(delay_TR), gx_spoil, gy_pre, gz_spoil)
+
+    # Check whether the timing of the sequence is correct
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    seq.calculate_kspacePP()
+
+    # Very optional slow step, but useful for testing during development e.g. for the real TE, TR or for staying within
+    # slew-rate limits
+    rep = seq.test_report()
+    print(rep)
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        # Prepare the sequence output for the scanner
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness])
+        seq.set_definition(key="Name", value="gre")
+
+        seq.write(seq_filename)
+
+    from py2jemris.seq2xml import seq2xml
+    seq2xml(seq, seq_name='t1_TSE_matrx16x16', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 169 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_gre_label.py

@@ -0,0 +1,169 @@
+import math
+
+import numpy as np
+
+import pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "gre_label_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 224e-3  # Define FOV and resolution
+    Nx = 256
+    Ny = Nx
+    alpha = 7  # Flip angle
+    slice_thickness = 3e-3  # Slice thickness
+    n_slices = 1
+    TE = 4.3e-3  # Echo time
+    TR = 10e-3  # Repetition time
+
+    rf_spoiling_inc = 117  # RF spoiling increment
+    ro_duration = 3.2e-3  # ADC duration
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=28,
+        grad_unit="mT/m",
+        max_slew=150,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create alpha-degree slice selection pulse and gradient
+    rf, gz, _ = pp.make_sinc_pulse(
+        flip_angle=alpha * np.pi / 180,
+        duration=3e-3,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        system=system,
+        return_gz=True,
+    )
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov
+    gx = pp.make_trapezoid(
+        channel="x", flat_area=Nx * delta_k, flat_time=ro_duration, system=system
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time, system=system
+    )
+    gx_pre = pp.make_trapezoid(
+        channel="x", area=-gx.area / 2, duration=1e-3, system=system
+    )
+    gz_reph = pp.make_trapezoid(
+        channel="z", area=-gz.area / 2, duration=1e-3, system=system
+    )
+    phase_areas = -(np.arange(Ny) - Ny / 2) * delta_k
+
+    # Gradient spoiling
+    gx_spoil = pp.make_trapezoid(channel="x", area=2 * Nx * delta_k, system=system)
+    gz_spoil = pp.make_trapezoid(channel="z", area=4 / slice_thickness, system=system)
+
+    # Calculate timing
+    delay_TE = (
+        math.ceil(
+            (
+                TE
+                - pp.calc_duration(gx_pre)
+                - gz.fall_time
+                - gz.flat_time / 2
+                - pp.calc_duration(gx) / 2
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    delay_TR = (
+        math.ceil(
+            (
+                TR
+                - pp.calc_duration(gz)
+                - pp.calc_duration(gx_pre)
+                - pp.calc_duration(gx)
+                - delay_TE
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    assert np.all(delay_TE >= 0)
+    assert np.all(delay_TR >= pp.calc_duration(gx_spoil, gz_spoil))
+
+    rf_phase = 0
+    rf_inc = 0
+
+    seq.add_block(pp.make_label(label="REV", type="SET", value=1))
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Loop over slices
+    for s in range(n_slices):
+        rf.freq_offset = gz.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+        # Loop over phase encodes and define sequence blocks
+        for i in range(Ny):
+            rf.phase_offset = rf_phase / 180 * np.pi
+            adc.phase_offset = rf_phase / 180 * np.pi
+            rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]
+            rf_phase = divmod(rf_phase + rf_inc, 360.0)[1]
+
+            seq.add_block(rf, gz)
+            gy_pre = pp.make_trapezoid(
+                channel="y",
+                area=phase_areas[i],
+                duration=pp.calc_duration(gx_pre),
+                system=system,
+            )
+            seq.add_block(gx_pre, gy_pre, gz_reph)
+            seq.add_block(pp.make_delay(delay_TE))
+            seq.add_block(gx, adc)
+            gy_pre.amplitude = -gy_pre.amplitude
+            spoil_block_contents = [pp.make_delay(delay_TR), gx_spoil, gy_pre, gz_spoil]
+            if i != Ny - 1:
+                spoil_block_contents.append(
+                    pp.make_label(type="INC", label="LIN", value=1)
+                )
+            else:
+                spoil_block_contents.extend(
+                    [
+                        pp.make_label(type="SET", label="LIN", value=0),
+                        pp.make_label(type="INC", label="SLC", value=1),
+                    ]
+                )
+            seq.add_block(*spoil_block_contents)
+
+    ok, error_report = seq.check_timing()
+
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot(label="lin", time_range=np.array([0, 32]) * TR, time_disp="ms")
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        # Prepare the sequence output for the scanner
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness * n_slices])
+        seq.set_definition(key="Name", value="gre_label")
+
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 326 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_haste.py

@@ -0,0 +1,326 @@
+import math
+import warnings
+
+import numpy as np
+
+from pypulseq.Sequence.sequence import Sequence
+from pypulseq.calc_rf_center import calc_rf_center
+from pypulseq.make_adc import make_adc
+from pypulseq.make_delay import make_delay
+from pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from pypulseq.make_sinc_pulse import make_sinc_pulse
+from pypulseq.make_trapezoid import make_trapezoid
+from pypulseq.opts import Opts
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "haste_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    dG = 250e-6
+
+    # Set system limits
+    system = Opts(
+        max_grad=30,
+        grad_unit="mT/m",
+        max_slew=170,
+        slew_unit="T/m/s",
+        rf_ringdown_time=100e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    seq = Sequence(system=system)  # Create a new sequence object
+    fov = 256e-3  # Define FOV and resolution
+    Ny_pre = 8
+    Nx, Ny = 128, 128
+    n_echo = int(Ny / 2 + Ny_pre)  # Number of echoes
+    n_slices = 1
+    rf_flip = 180  # Flip angle
+    if isinstance(rf_flip, int):
+        rf_flip = np.zeros(n_echo) + rf_flip
+    slice_thickness = 5e-3  # Slice thickness
+    TE = 12e-3  # Echo time
+    TR = 2000e-3  # Repetition time
+
+    sampling_time = 6.4e-3
+    readout_time = sampling_time + 2 * system.adc_dead_time
+    t_ex = 2.5e-3
+    t_ex_wd = t_ex + system.rf_ringdown_time + system.rf_dead_time
+    t_ref = 2e-3
+    tf_ref_wd = t_ref + system.rf_ringdown_time + system.rf_dead_time
+    t_sp = 0.5 * (TE - readout_time - tf_ref_wd)
+    t_sp_ex = 0.5 * (TE - t_ex_wd - tf_ref_wd)
+    fspR = 1.0
+    fspS = 0.5
+
+    rfex_phase = math.pi / 2
+    rfref_phase = 0
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create 90 degree slice selection pulse and gradient
+    flipex = 90 * math.pi / 180
+    rfex, gz, _ = make_sinc_pulse(
+        flip_angle=flipex,
+        system=system,
+        duration=t_ex,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        phase_offset=rfex_phase,
+        return_gz=True,
+    )
+    GS_ex = make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=gz.amplitude,
+        flat_time=t_ex_wd,
+        rise_time=dG,
+    )
+
+    flipref = rf_flip[0] * math.pi / 180
+    rfref, gz, _ = make_sinc_pulse(
+        flip_angle=flipref,
+        system=system,
+        duration=t_ref,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        phase_offset=rfref_phase,
+        use="refocusing",
+        return_gz=True,
+    )
+    GS_ref = make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=GS_ex.amplitude,
+        flat_time=tf_ref_wd,
+        rise_time=dG,
+    )
+
+    AGS_ex = GS_ex.area / 2
+    GS_spr = make_trapezoid(
+        channel="z",
+        system=system,
+        area=AGS_ex * (1 + fspS),
+        duration=t_sp,
+        rise_time=dG,
+    )
+    GS_spex = make_trapezoid(
+        channel="z", system=system, area=AGS_ex * fspS, duration=t_sp_ex, rise_time=dG
+    )
+
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+
+    GR_acq = make_trapezoid(
+        channel="x",
+        system=system,
+        flat_area=k_width,
+        flat_time=readout_time,
+        rise_time=dG,
+    )
+    adc = make_adc(num_samples=Nx, duration=sampling_time, delay=system.adc_dead_time)
+    GR_spr = make_trapezoid(
+        channel="x", system=system, area=GR_acq.area * fspR, duration=t_sp, rise_time=dG
+    )
+    GR_spex = make_trapezoid(
+        channel="x",
+        system=system,
+        area=GR_acq.area * (1 + fspR),
+        duration=t_sp_ex,
+        rise_time=dG,
+    )
+
+    AGR_spr = GR_spr.area
+    AGR_preph = GR_acq.area / 2 + AGR_spr
+    GR_preph = make_trapezoid(
+        channel="x", system=system, area=AGR_preph, duration=t_sp_ex, rise_time=dG
+    )
+
+    n_ex = 1
+    PE_order = np.arange(-Ny_pre, Ny + 1).T
+    phase_areas = PE_order * delta_k
+
+    # Split gradients and recombine into blocks
+    GS1_times = np.array([0, GS_ex.rise_time])
+    GS1_amp = np.array([0, GS_ex.amplitude])
+    GS1 = make_extended_trapezoid(channel="z", times=GS1_times, amplitudes=GS1_amp)
+
+    GS2_times = np.array([0, GS_ex.flat_time])
+    GS2_amp = np.array([GS_ex.amplitude, GS_ex.amplitude])
+    GS2 = make_extended_trapezoid(channel="z", times=GS2_times, amplitudes=GS2_amp)
+
+    GS3_times = np.array(
+        [
+            0,
+            GS_spex.rise_time,
+            GS_spex.rise_time + GS_spex.flat_time,
+            GS_spex.rise_time + GS_spex.flat_time + GS_spex.fall_time,
+        ]
+    )
+    GS3_amp = np.array(
+        [GS_ex.amplitude, GS_spex.amplitude, GS_spex.amplitude, GS_ref.amplitude]
+    )
+    GS3 = make_extended_trapezoid(channel="z", times=GS3_times, amplitudes=GS3_amp)
+
+    GS4_times = np.array([0, GS_ref.flat_time])
+    GS4_amp = np.array([GS_ref.amplitude, GS_ref.amplitude])
+    GS4 = make_extended_trapezoid(channel="z", times=GS4_times, amplitudes=GS4_amp)
+
+    GS5_times = np.array(
+        [
+            0,
+            GS_spr.rise_time,
+            GS_spr.rise_time + GS_spr.flat_time,
+            GS_spr.rise_time + GS_spr.flat_time + GS_spr.fall_time,
+        ]
+    )
+    GS5_amp = np.array([GS_ref.amplitude, GS_spr.amplitude, GS_spr.amplitude, 0])
+    GS5 = make_extended_trapezoid(channel="z", times=GS5_times, amplitudes=GS5_amp)
+
+    GS7_times = np.array(
+        [
+            0,
+            GS_spr.rise_time,
+            GS_spr.rise_time + GS_spr.flat_time,
+            GS_spr.rise_time + GS_spr.flat_time + GS_spr.fall_time,
+        ]
+    )
+    GS7_amp = np.array([0, GS_spr.amplitude, GS_spr.amplitude, GS_ref.amplitude])
+    GS7 = make_extended_trapezoid(channel="z", times=GS7_times, amplitudes=GS7_amp)
+
+    # Readout gradient
+    GR3 = GR_preph
+
+    GR5_times = np.array(
+        [
+            0,
+            GR_spr.rise_time,
+            GR_spr.rise_time + GR_spr.flat_time,
+            GR_spr.rise_time + GR_spr.flat_time + GR_spr.fall_time,
+        ]
+    )
+    GR5_amp = np.array([0, GR_spr.amplitude, GR_spr.amplitude, GR_acq.amplitude])
+    GR5 = make_extended_trapezoid(channel="x", times=GR5_times, amplitudes=GR5_amp)
+
+    GR6_times = np.array([0, readout_time])
+    GR6_amp = np.array([GR_acq.amplitude, GR_acq.amplitude])
+    GR6 = make_extended_trapezoid(channel="x", times=GR6_times, amplitudes=GR6_amp)
+
+    GR7_times = np.array(
+        [
+            0,
+            GR_spr.rise_time,
+            GR_spr.rise_time + GR_spr.flat_time,
+            GR_spr.rise_time + GR_spr.flat_time + GR_spr.fall_time,
+        ]
+    )
+    GR7_amp = np.array([GR_acq.amplitude, GR_spr.amplitude, GR_spr.amplitude, 0])
+    GR7 = make_extended_trapezoid(channel="x", times=GR7_times, amplitudes=GR7_amp)
+
+    # Fill-times
+    tex = GS1.shape_dur + GS2.shape_dur + GS3.shape_dur
+    tref = GS4.shape_dur + GS5.shape_dur + GS7.shape_dur + readout_time
+    tend = GS4.shape_dur + GS5.shape_dur
+    TE_train = tex + n_echo * tref + tend
+    TR_fill = (TR - n_slices * TE_train) / n_slices  # Round to gradient raster
+
+    TR_fill = system.grad_raster_time * round(TR_fill / system.grad_raster_time)
+    if TR_fill < 0:
+        TR_fill = 1e-3
+        warnings.warn(
+            f"TR too short, adapted to include all slices to: {1000 * n_slices * (TE_train + TR_fill)} ms"
+        )
+    else:
+        print(f"TR fill: {1000 * TR_fill} ms")
+    delay_TR = make_delay(TR_fill)
+    delay_end = make_delay(5)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    # Define sequence blocks
+    for k_ex in range(n_ex):
+        for s in range(n_slices):
+            rfex.freq_offset = (
+                GS_ex.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            rfref.freq_offset = (
+                GS_ref.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            # Align the phase for off-center slices
+            rfex.phase_offset = (
+                rfex_phase - 2 * math.pi * rfex.freq_offset * calc_rf_center(rfex)[0]
+            )
+            rfref.phase_offset = (
+                rfref_phase - 2 * math.pi * rfref.freq_offset * calc_rf_center(rfref)[0]
+            )
+
+            seq.add_block(GS1)
+            seq.add_block(GS2, rfex)
+            seq.add_block(GS3, GR3)
+
+            for k_ech in range(n_echo):
+                if k_ex >= 0:
+                    phase_area = phase_areas[k_ech]
+                else:
+                    phase_area = 0
+
+                GP_pre = make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+                GP_rew = make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=-phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+
+                seq.add_block(GS4, rfref)
+                seq.add_block(GS5, GR5, GP_pre)
+
+                if k_ex >= 0:
+                    seq.add_block(GR6, adc)
+                else:
+                    seq.add_block(GR6)
+
+                seq.add_block(GS7, GR7, GP_rew)
+
+            seq.add_block(GS4)
+            seq.add_block(GS5)
+            seq.add_block(delay_TR)
+
+    seq.add_block(delay_end)
+
+    # Check whether the timing of the sequence is correct
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+
+
+# SETUPeq")
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 142 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_radial_gre.py

@@ -0,0 +1,142 @@
+import numpy as np
+
+import pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "gre_radial_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 260e-3
+    Nx = 320  # Define FOV and resolution
+    alpha = 10  # Flip angle
+    slice_thickness = 3e-3  # Slice thickness
+    TE = 8e-3  # Echo time
+    TR = 20e-3  # Repetition time
+    Nr = 256  # Number of radial spokes
+    N_dummy = 20  # Number of dummy scans
+    delta = np.pi / Nr  # Angular increment
+
+    rf_spoiling_inc = 117  # RF spoiling increment
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=28,
+        grad_unit="mT/m",
+        max_slew=120,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create alpha-degree slice selection pulse and gradient
+    rf, gz, _ = pp.make_sinc_pulse(
+        apodization=0.5,
+        duration=4e-3,
+        flip_angle=alpha * np.pi / 180,
+        slice_thickness=slice_thickness,
+        system=system,
+        time_bw_product=4,
+        return_gz=True,
+    )
+
+    # Define other gradients and ADC events
+    deltak = 1 / fov
+    gx = pp.make_trapezoid(
+        channel="x", flat_area=Nx * deltak, flat_time=6.4e-3 / 5, system=system
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, duration=gx.flat_time, delay=gx.rise_time, system=system
+    )
+    gx_pre = pp.make_trapezoid(
+        channel="x", area=-gx.area / 2 - deltak / 2, duration=2e-3, system=system
+    )
+    gz_reph = pp.make_trapezoid(
+        channel="z", area=-gz.area / 2, duration=2e-3, system=system
+    )
+    # Gradient spoiling
+    gx_spoil = pp.make_trapezoid(channel="x", area=0.5 * Nx * deltak, system=system)
+    gz_spoil = pp.make_trapezoid(channel="z", area=4 / slice_thickness, system=system)
+
+    # Calculate timing
+    delay_TE = (
+        np.ceil(
+            (
+                TE
+                - pp.calc_duration(gx_pre)
+                - gz.fall_time
+                - gz.flat_time / 2
+                - pp.calc_duration(gx) / 2
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    delay_TR = (
+        np.ceil(
+            (
+                TR
+                - pp.calc_duration(gx_pre)
+                - pp.calc_duration(gz)
+                - pp.calc_duration(gx)
+                - delay_TE
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    assert np.all(delay_TR) > pp.calc_duration(gx_spoil, gz_spoil)
+    rf_phase = 0
+    rf_inc = 0
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    for i in range(-N_dummy, Nr + 1):
+        rf.phase_offset = rf_phase / 180 * np.pi
+        adc.phase_offset = rf_phase / 180 * np.pi
+
+        rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]
+        rf_phase = divmod(rf_inc + rf_phase, 360.0)[1]
+
+        seq.add_block(rf, gz)
+        phi = delta * (i - 1)
+        seq.add_block(*pp.rotate(gx_pre, gz_reph, angle=phi, axis="z"))
+        seq.add_block(pp.make_delay(delay_TE))
+        if i > 0:
+            seq.add_block(*pp.rotate(gx, adc, angle=phi, axis="z"))
+        else:
+            seq.add_block(*pp.rotate(gx, angle=phi, axis="z"))
+        seq.add_block(
+            *pp.rotate(gx_spoil, gz_spoil, pp.make_delay(delay_TR), angle=phi, axis="z")
+        )
+
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed! Error listing follows:")
+        print(error_report)
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness])
+        seq.set_definition(key="Name", value="gre_rad")
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 332 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_tse.py

@@ -0,0 +1,332 @@
+import math
+import warnings
+
+import numpy as np
+
+import LF_scanner.pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "tse_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    dG = 250e-6
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=32,
+        grad_unit="mT/m",
+        max_slew=130,
+        slew_unit="T/m/s",
+        rf_ringdown_time=100e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    seq = pp.Sequence(system)  # Create a new sequence object
+    fov = 256e-3  # Define FOV and resolution
+    Nx, Ny = 32, 32
+    n_echo = 16  # Number of echoes
+    n_slices = 3
+    rf_flip = 180  # Flip angle
+    if isinstance(rf_flip, int):
+        rf_flip = np.zeros(n_echo) + rf_flip
+    slice_thickness = 5e-3
+    TE = 12e-3  # Echo time
+    TR = 2000e-3  # Repetition time
+
+    sampling_time = 6.4e-3
+    readout_time = sampling_time + 2 * system.adc_dead_time
+    t_ex = 2.5e-3
+    t_exwd = t_ex + system.rf_ringdown_time + system.rf_dead_time
+    t_ref = 2e-3
+    t_refwd = t_ref + system.rf_ringdown_time + system.rf_dead_time
+    t_sp = 0.5 * (TE - readout_time - t_refwd)
+    t_spex = 0.5 * (TE - t_exwd - t_refwd)
+    fsp_r = 1
+    fsp_s = 0.5
+
+    rf_ex_phase = np.pi / 2
+    rf_ref_phase = 0
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    flip_ex = 90 * np.pi / 180
+    rf_ex, gz, _ = pp.make_sinc_pulse(
+        flip_angle=flip_ex,
+        system=system,
+        duration=t_ex,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        phase_offset=rf_ex_phase,
+        return_gz=True,
+    )
+    gs_ex = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=gz.amplitude,
+        flat_time=t_exwd,
+        rise_time=dG,
+    )
+
+    flip_ref = rf_flip[0] * np.pi / 180
+    rf_ref, gz, _ = pp.make_sinc_pulse(
+        flip_angle=flip_ref,
+        system=system,
+        duration=t_ref,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=4,
+        phase_offset=rf_ref_phase,
+        use="refocusing",
+        return_gz=True,
+    )
+    gs_ref = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=gs_ex.amplitude,
+        flat_time=t_refwd,
+        rise_time=dG,
+    )
+
+    ags_ex = gs_ex.area / 2
+    gs_spr = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        area=ags_ex * (1 + fsp_s),
+        duration=t_sp,
+        rise_time=dG,
+    )
+    gs_spex = pp.make_trapezoid(
+        channel="z", system=system, area=ags_ex * fsp_s, duration=t_spex, rise_time=dG
+    )
+
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+
+    gr_acq = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        flat_area=k_width,
+        flat_time=readout_time,
+        rise_time=dG,
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, duration=sampling_time, delay=system.adc_dead_time
+    )
+    gr_spr = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        area=gr_acq.area * fsp_r,
+        duration=t_sp,
+        rise_time=dG,
+    )
+
+    agr_spr = gr_spr.area
+    agr_preph = gr_acq.area / 2 + agr_spr
+    gr_preph = pp.make_trapezoid(
+        channel="x", system=system, area=agr_preph, duration=t_spex, rise_time=dG
+    )
+
+    # Phase-encoding
+    n_ex = math.floor(Ny / n_echo)
+    pe_steps = np.arange(1, n_echo * n_ex + 1) - 0.5 * n_echo * n_ex - 1
+    if divmod(n_echo, 2)[1] == 0:
+        pe_steps = np.roll(pe_steps, [0, int(-np.round(n_ex / 2))])
+    pe_order = pe_steps.reshape((n_ex, n_echo), order="F").T
+    phase_areas = pe_order * delta_k
+
+    # Split gradients and recombine into blocks
+    gs1_times = np.array([0, gs_ex.rise_time])
+    gs1_amp = np.array([0, gs_ex.amplitude])
+    gs1 = pp.make_extended_trapezoid(channel="z", times=gs1_times, amplitudes=gs1_amp)
+
+    gs2_times = np.array([0, gs_ex.flat_time])
+    gs2_amp = np.array([gs_ex.amplitude, gs_ex.amplitude])
+    gs2 = pp.make_extended_trapezoid(channel="z", times=gs2_times, amplitudes=gs2_amp)
+
+    gs3_times = np.array(
+        [
+            0,
+            gs_spex.rise_time,
+            gs_spex.rise_time + gs_spex.flat_time,
+            gs_spex.rise_time + gs_spex.flat_time + gs_spex.fall_time,
+        ]
+    )
+    gs3_amp = np.array(
+        [gs_ex.amplitude, gs_spex.amplitude, gs_spex.amplitude, gs_ref.amplitude]
+    )
+    gs3 = pp.make_extended_trapezoid(channel="z", times=gs3_times, amplitudes=gs3_amp)
+
+    gs4_times = np.array([0, gs_ref.flat_time])
+    gs4_amp = np.array([gs_ref.amplitude, gs_ref.amplitude])
+    gs4 = pp.make_extended_trapezoid(channel="z", times=gs4_times, amplitudes=gs4_amp)
+
+    gs5_times = np.array(
+        [
+            0,
+            gs_spr.rise_time,
+            gs_spr.rise_time + gs_spr.flat_time,
+            gs_spr.rise_time + gs_spr.flat_time + gs_spr.fall_time,
+        ]
+    )
+    gs5_amp = np.array([gs_ref.amplitude, gs_spr.amplitude, gs_spr.amplitude, 0])
+    gs5 = pp.make_extended_trapezoid(channel="z", times=gs5_times, amplitudes=gs5_amp)
+
+    gs7_times = np.array(
+        [
+            0,
+            gs_spr.rise_time,
+            gs_spr.rise_time + gs_spr.flat_time,
+            gs_spr.rise_time + gs_spr.flat_time + gs_spr.fall_time,
+        ]
+    )
+    gs7_amp = np.array([0, gs_spr.amplitude, gs_spr.amplitude, gs_ref.amplitude])
+    gs7 = pp.make_extended_trapezoid(channel="z", times=gs7_times, amplitudes=gs7_amp)
+
+    # Readout gradient
+    gr3 = gr_preph
+
+    gr5_times = np.array(
+        [
+            0,
+            gr_spr.rise_time,
+            gr_spr.rise_time + gr_spr.flat_time,
+            gr_spr.rise_time + gr_spr.flat_time + gr_spr.fall_time,
+        ]
+    )
+    gr5_amp = np.array([0, gr_spr.amplitude, gr_spr.amplitude, gr_acq.amplitude])
+    gr5 = pp.make_extended_trapezoid(channel="x", times=gr5_times, amplitudes=gr5_amp)
+
+    gr6_times = np.array([0, readout_time])
+    gr6_amp = np.array([gr_acq.amplitude, gr_acq.amplitude])
+    gr6 = pp.make_extended_trapezoid(channel="x", times=gr6_times, amplitudes=gr6_amp)
+
+    gr7_times = np.array(
+        [
+            0,
+            gr_spr.rise_time,
+            gr_spr.rise_time + gr_spr.flat_time,
+            gr_spr.rise_time + gr_spr.flat_time + gr_spr.fall_time,
+        ]
+    )
+    gr7_amp = np.array([gr_acq.amplitude, gr_spr.amplitude, gr_spr.amplitude, 0])
+    gr7 = pp.make_extended_trapezoid(channel="x", times=gr7_times, amplitudes=gr7_amp)
+
+    # Fill-times
+    t_ex = pp.calc_duration(gs1) + pp.calc_duration(gs2) + pp.calc_duration(gs3)
+    t_ref = (
+        pp.calc_duration(gs4)
+        + pp.calc_duration(gs5)
+        + pp.calc_duration(gs7)
+        + readout_time
+    )
+    t_end = pp.calc_duration(gs4) + pp.calc_duration(gs5)
+
+    a = pp.calc_duration(gs2)/2 + pp.calc_duration(gs3) + pp.calc_duration(gs4)/2
+    b = pp.calc_duration(gs5) + pp.calc_duration(gs4)/2 + pp.calc_duration(gr6)/2
+    c = pp.calc_duration(gr6) + pp.calc_duration(gs5) + pp.calc_duration(gs7) + pp.calc_duration(gs4)
+
+
+    TE_train = t_ex + n_echo * t_ref + t_end
+    TR_fill = (TR - n_slices * TE_train) / n_slices
+    # Round to gradient raster
+    TR_fill = system.grad_raster_time * np.round(TR_fill / system.grad_raster_time)
+    if TR_fill < 0:
+        TR_fill = 1e-3
+        warnings.warn(
+            f"TR too short, adapted to include all slices to: {1000 * n_slices * (TE_train + TR_fill)} ms"
+        )
+    else:
+        print(f"TR fill: {1000 * TR_fill} ms")
+    delay_TR = pp.make_delay(TR_fill)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    #for k_ex in range(n_ex + 1):
+    for k_ex in range(n_ex):
+        for s in range(n_slices):
+            rf_ex.freq_offset = (
+                gs_ex.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            rf_ref.freq_offset = (
+                gs_ref.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            rf_ex.phase_offset = (
+                rf_ex_phase
+                - 2 * np.pi * rf_ex.freq_offset * pp.calc_rf_center(rf_ex)[0]
+            )
+            rf_ref.phase_offset = (
+                rf_ref_phase
+                - 2 * np.pi * rf_ref.freq_offset * pp.calc_rf_center(rf_ref)[0]
+            )
+
+            seq.add_block(gs1)
+            seq.add_block(gs2, rf_ex)
+            seq.add_block(gs3, gr3)
+
+            for k_echo in range(n_echo):
+                #if k_ex > 0:
+                if k_ex > -1:
+                    phase_area = phase_areas[k_echo, k_ex - 1]
+                else:
+                    phase_area = 0.0  # 0.0 and not 0 because -phase_area should successfully result in negative zero
+
+                gp_pre = pp.make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+                gp_rew = pp.make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=-phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+                seq.add_block(gs4, rf_ref)
+                seq.add_block(gs5, gr5, gp_pre)
+                #if k_ex > 0:
+                if k_ex > -1:
+                    seq.add_block(gr6, adc)
+                else:
+                    seq.add_block(gr6)
+
+                seq.add_block(gs7, gr7, gp_rew)
+
+            seq.add_block(gs4)
+            seq.add_block(gs5)
+            seq.add_block(delay_TR)
+
+    (
+        ok,
+        error_report,
+    ) = seq.check_timing()  # Check whether the timing of the sequence is correct
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+    #from py2jemris.seq2xml import seq2xml
+    #seq2xml(seq, seq_name='t1_TSE_matrx16x16', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 347 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_tse_new.py

@@ -0,0 +1,347 @@
+import math
+import warnings
+import json as j
+
+import numpy as np
+
+import MRI_seq.pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "tse_pypulseq.seq",
+         params_folder="C:\\MRI_sequences_files\\First_row_P\\TSE\\temp\\",
+         params_name="C:\\MRI_sequences_files\\First_row_P\\TSE\\temp\\TSE.json"
+         ):
+
+    # Reading json file according to the weightning of the image
+    with open(params_name, 'rb') as f:
+        params = j.load(f)
+
+    dG = params['dG']
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=37.8,
+        grad_unit="mT/m",
+        max_slew=121,
+        slew_unit="T/m/s",
+        rf_ringdown_time=params['rf_ringdown_time'],
+        rf_dead_time=params['rf_dead_time'],
+        adc_dead_time=params['adc_dead_time'],
+        rf_raster_time=params['rf_raster_time'],
+        grad_raster_time=params['grad_raster_time'],
+        block_duration_raster=params['grad_raster_time'],
+        adc_raster_time=1 / (params['BW_pixel'] * params['Nf'])
+    )
+
+    seq = pp.Sequence(system)  # Create a new sequence object
+    fov = params['FoV_f']  # Define FOV and resolution
+    Nx, Ny = params['Nf'] , params['Np']
+    n_echo = 16  # Number of echoes
+    n_slices = 1
+    rf_flip = 180  # Flip angle
+    if isinstance(rf_flip, int):
+        rf_flip = np.zeros(n_echo) + rf_flip
+    slice_thickness = 5e-3
+    TE = params['ES'] # Echo time
+    TR = params['TR']  # Repetition time
+
+    readout_time = round(1 / params['BW_pixel'], 8)
+    sampling_time = readout_time-2 * system.adc_dead_time
+    t_ex = params['t_ex']
+    t_exwd = t_ex + 2*system.rf_dead_time
+    t_ref = params['t_ref']
+    t_refwd = t_ref + 2*system.rf_dead_time
+    t_sp = 0.5 * (TE - readout_time - t_refwd)
+    t_spex = 0.5 * (TE - t_exwd - t_refwd)
+    fsp_r = 1
+    fsp_s = 0.5
+
+    rf_ex_phase = np.pi / 2
+    rf_ref_phase = 0
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    flip_ex = 90 * np.pi / 180
+    rf_ex, gz, _ = pp.make_sinc_pulse(
+        flip_angle=flip_ex,
+        system=system,
+        duration=t_ex,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=params['t_BW_product_ex'],
+        phase_offset=rf_ex_phase,
+        return_gz=True,
+    )
+    gs_ex = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=gz.amplitude,
+        flat_time=t_exwd,
+        rise_time=dG,
+    )
+
+    flip_ref = rf_flip[0] * np.pi / 180
+    rf_ref, gz, _ = pp.make_sinc_pulse(
+        flip_angle=flip_ref,
+        system=system,
+        duration=t_ref,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=params['t_BW_product_ref'],
+        phase_offset=rf_ref_phase,
+        use="refocusing",
+        return_gz=True,
+    )
+    gs_ref = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        amplitude=gs_ex.amplitude,
+        flat_time=t_refwd,
+        rise_time=dG,
+    )
+
+    ags_ex = gs_ex.area / 2
+    gs_spr = pp.make_trapezoid(
+        channel="z",
+        system=system,
+        area=ags_ex * (1 + fsp_s),
+        duration=t_sp,
+        rise_time=dG,
+    )
+    gs_spex = pp.make_trapezoid(
+        channel="z", system=system, area=ags_ex * fsp_s, duration=t_spex, rise_time=dG
+    )
+
+    delta_k = 1 / fov
+    k_width = Nx * delta_k
+
+    gr_acq = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        flat_area=k_width,
+        flat_time=readout_time,
+        rise_time=dG,
+    )
+    adc = pp.make_adc(
+        num_samples=Nx, duration=sampling_time, delay=system.adc_dead_time
+    )
+    gr_spr = pp.make_trapezoid(
+        channel="x",
+        system=system,
+        area=gr_acq.area * fsp_r,
+        duration=t_sp,
+        rise_time=dG,
+    )
+
+    agr_spr = gr_spr.area
+    agr_preph = gr_acq.area / 2 + agr_spr
+    gr_preph = pp.make_trapezoid(
+        channel="x", system=system, area=agr_preph, duration=t_spex, rise_time=dG
+    )
+
+    # Phase-encoding
+    n_ex = math.floor(Ny / n_echo)
+    pe_steps = np.arange(1, n_echo * n_ex + 1) - 0.5 * n_echo * n_ex - 1
+    if divmod(n_echo, 2)[1] == 0:
+        pe_steps = np.roll(pe_steps, [0, int(-np.round(n_ex / 2))])
+    pe_order = pe_steps.reshape((n_ex, n_echo), order="F").T
+    phase_areas = pe_order * delta_k
+
+    # Split gradients and recombine into blocks
+    gs1_times = np.array([0, gs_ex.rise_time])
+    gs1_amp = np.array([0, gs_ex.amplitude])
+    gs1 = pp.make_extended_trapezoid(channel="z", times=gs1_times, amplitudes=gs1_amp)
+
+    gs2_times = np.array([0, gs_ex.flat_time])
+    gs2_amp = np.array([gs_ex.amplitude, gs_ex.amplitude])
+    gs2 = pp.make_extended_trapezoid(channel="z", times=gs2_times, amplitudes=gs2_amp)
+
+    gs3_times = np.array(
+        [
+            0,
+            gs_spex.rise_time,
+            gs_spex.rise_time + gs_spex.flat_time,
+            gs_spex.rise_time + gs_spex.flat_time + gs_spex.fall_time,
+        ]
+    )
+    gs3_amp = np.array(
+        [gs_ex.amplitude, gs_spex.amplitude, gs_spex.amplitude, gs_ref.amplitude]
+    )
+    gs3 = pp.make_extended_trapezoid(channel="z", times=gs3_times, amplitudes=gs3_amp)
+
+    gs4_times = np.array([0, gs_ref.flat_time])
+    gs4_amp = np.array([gs_ref.amplitude, gs_ref.amplitude])
+    gs4 = pp.make_extended_trapezoid(channel="z", times=gs4_times, amplitudes=gs4_amp)
+
+    gs5_times = np.array(
+        [
+            0,
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time + gs_spr.flat_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time + gs_spr.flat_time + gs_spr.fall_time) / system.grad_raster_time)
+        ]
+    )
+    gs5_amp = np.array([gs_ref.amplitude, gs_spr.amplitude, gs_spr.amplitude, 0])
+    gs5 = pp.make_extended_trapezoid(channel="z", times=gs5_times, amplitudes=gs5_amp)
+
+    gs7_times = np.array(
+        [
+            0,
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time + gs_spr.flat_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round(
+                (gs_spr.rise_time + gs_spr.flat_time + gs_spr.fall_time) / system.grad_raster_time)
+        ]
+    )
+    gs7_amp = np.array([0, gs_spr.amplitude, gs_spr.amplitude, gs_ref.amplitude])
+    gs7 = pp.make_extended_trapezoid(channel="z", times=gs7_times, amplitudes=gs7_amp)
+
+    # Readout gradient
+    gr3 = gr_preph
+
+    gr5_times = np.array(
+        [
+            0,
+            system.grad_raster_time * np.round((gr_spr.rise_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round((gr_spr.rise_time + gr_spr.flat_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round((gr_spr.rise_time + gr_spr.flat_time + gr_spr.fall_time) / system.grad_raster_time)
+        ]
+    )
+    gr5_amp = np.array([0, gr_spr.amplitude, gr_spr.amplitude, gr_acq.amplitude])
+    gr5 = pp.make_extended_trapezoid(channel="x", times=gr5_times, amplitudes=gr5_amp)
+
+    gr6_times = np.array([0, readout_time])
+    gr6_amp = np.array([gr_acq.amplitude, gr_acq.amplitude])
+    gr6 = pp.make_extended_trapezoid(channel="x", times=gr6_times, amplitudes=gr6_amp)
+
+    gr7_times = np.array(
+        [
+            0,
+            system.grad_raster_time * np.round((gr_spr.rise_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round((gr_spr.rise_time + gr_spr.flat_time) / system.grad_raster_time),
+            system.grad_raster_time * np.round(
+                (gr_spr.rise_time + gr_spr.flat_time + gr_spr.fall_time) / system.grad_raster_time)
+
+        ]
+    )
+    gr7_amp = np.array([gr_acq.amplitude, gr_spr.amplitude, gr_spr.amplitude, 0])
+    gr7 = pp.make_extended_trapezoid(channel="x", times=gr7_times, amplitudes=gr7_amp)
+
+    # Fill-times
+    t_ex = pp.calc_duration(gs1) + pp.calc_duration(gs2) + pp.calc_duration(gs3)
+    t_ref = (
+        pp.calc_duration(gs4)
+        + pp.calc_duration(gs5)
+        + pp.calc_duration(gs7)
+        + readout_time
+    )
+    t_end = pp.calc_duration(gs4) + pp.calc_duration(gs5)
+
+    a = pp.calc_duration(gs2)/2 + pp.calc_duration(gs3) + pp.calc_duration(gs4)/2
+    b = pp.calc_duration(gs5) + pp.calc_duration(gs4)/2 + pp.calc_duration(gr6)/2
+    c = pp.calc_duration(gr6) + pp.calc_duration(gs5) + pp.calc_duration(gs7) + pp.calc_duration(gs4)
+
+
+    TE_train = t_ex + n_echo * t_ref + t_end
+    TR_fill = (TR - n_slices * TE_train) / n_slices
+    # Round to gradient raster
+    TR_fill = system.grad_raster_time * np.round(TR_fill / system.grad_raster_time)
+    if TR_fill < 0:
+        TR_fill = 1e-3
+        warnings.warn(
+            f"TR too short, adapted to include all slices to: {1000 * n_slices * (TE_train + TR_fill)} ms"
+        )
+    else:
+        print(f"TR fill: {1000 * TR_fill} ms")
+    delay_TR = pp.make_delay(TR_fill)
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    for k_ex in range(n_ex + 1):
+        for s in range(n_slices):
+            rf_ex.freq_offset = (
+                gs_ex.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            rf_ref.freq_offset = (
+                gs_ref.amplitude * slice_thickness * (s - (n_slices - 1) / 2)
+            )
+            rf_ex.phase_offset = (
+                rf_ex_phase
+                - 2 * np.pi * rf_ex.freq_offset * pp.calc_rf_center(rf_ex)[0]
+            )
+            rf_ref.phase_offset = (
+                rf_ref_phase
+                - 2 * np.pi * rf_ref.freq_offset * pp.calc_rf_center(rf_ref)[0]
+            )
+
+            seq.add_block(gs1)
+            seq.add_block(gs2, rf_ex)
+            seq.add_block(gs3, gr3)
+
+            for k_echo in range(n_echo):
+                if k_ex > 0:
+                    phase_area = phase_areas[k_echo, k_ex - 1]
+                else:
+                    phase_area = 0.0  # 0.0 and not 0 because -phase_area should successfully result in negative zero
+
+                gp_pre = pp.make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+                gp_rew = pp.make_trapezoid(
+                    channel="y",
+                    system=system,
+                    area=-phase_area,
+                    duration=t_sp,
+                    rise_time=dG,
+                )
+                seq.add_block(gs4, rf_ref)
+                seq.add_block(gs5, gr5, gp_pre)
+                if k_ex > 0:
+                    seq.add_block(gr6, adc)
+                else:
+                    seq.add_block(gr6)
+
+                seq.add_block(gs7, gr7, gp_rew)
+
+            seq.add_block(gs4)
+            seq.add_block(gs5)
+            seq.add_block(delay_TR)
+
+    (
+        ok,
+        error_report,
+    ) = seq.check_timing()  # Check whether the timing of the sequence is correct
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        seq.write(seq_filename)
+    #from py2jemris.seq2xml import seq2xml
+    #seq2xml(seq, seq_name='t1_TSE_matrx16x16', out_folder='C:\\MRI_seq\\new_MRI_pulse_seq\\t1_TSE')
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 180 - 0
LF_scanner/pypulseq/seq_examples/scripts/write_ute.py

@@ -0,0 +1,180 @@
+"""
+A very basic UTE-like sequence, without ramp-sampling, ramp-RF. Achieves TE in the range of 300-400 us
+"""
+from copy import copy
+
+import numpy as np
+from matplotlib import pyplot as plt
+
+import pypulseq as pp
+
+
+def main(plot: bool, write_seq: bool, seq_filename: str = "ute_pypulseq.seq"):
+    # ======
+    # SETUP
+    # ======
+    seq = pp.Sequence()  # Create a new sequence object
+    fov = 250e-3  # Define FOV and resolution
+    Nx = 256
+    alpha = 10  # Flip angle
+    slice_thickness = 3e-3  # Slice thickness
+    TR = 10e-3  # Repetition tme
+    Nr = 128  # Number of radial spokes
+    delta = 2 * np.pi / Nr  # Angular increment
+    ro_duration = 2.56e-3  # Read-out time: controls RO bandwidth and T2-blurring
+    ro_os = 2  # Oversampling
+    ro_asymmetry = 1  # 0: Fully symmetric; 1: half-echo
+
+    rf_spoiling_inc = 117  # RF spoiling increment
+
+    # Set system limits
+    system = pp.Opts(
+        max_grad=28,
+        grad_unit="mT/m",
+        max_slew=100,
+        slew_unit="T/m/s",
+        rf_ringdown_time=20e-6,
+        rf_dead_time=100e-6,
+        adc_dead_time=10e-6,
+    )
+
+    # ======
+    # CREATE EVENTS
+    # ======
+    # Create alpha-degree slice selection pulse and gradient
+    rf, gz, gz_reph = pp.make_sinc_pulse(
+        flip_angle=alpha * np.pi / 180,
+        duration=1e-3,
+        slice_thickness=slice_thickness,
+        apodization=0.5,
+        time_bw_product=2,
+        center_pos=1,
+        system=system,
+        return_gz=True,
+    )
+
+    # Align RO asymmetry to ADC samples
+    Nxo = np.round(ro_os * Nx)
+    ro_asymmetry = pp.round_half_up(ro_asymmetry * Nxo / 2) / Nxo * 2
+
+    # Define other gradients and ADC events
+    delta_k = 1 / fov / (1 + ro_asymmetry)
+    ro_area = Nx * delta_k
+    gx = pp.make_trapezoid(
+        channel="x", flat_area=ro_area, flat_time=ro_duration, system=system
+    )
+    adc = pp.make_adc(
+        num_samples=Nxo, duration=gx.flat_time, delay=gx.rise_time, system=system
+    )
+    gx_pre = pp.make_trapezoid(
+        channel="x",
+        area=-(gx.area - ro_area) / 2
+        - gx.amplitude * adc.dwell / 2
+        - ro_area / 2 * (1 - ro_asymmetry),
+        system=system,
+    )
+
+    # Gradient spoiling
+    gx_spoil = pp.make_trapezoid(channel="x", area=0.2 * Nx * delta_k, system=system)
+
+    # Calculate timing
+    TE = (
+        gz.fall_time
+        + pp.calc_duration(gx_pre, gz_reph)
+        + gx.rise_time
+        + adc.dwell * Nxo / 2 * (1 - ro_asymmetry)
+    )
+    delay_TR = (
+        np.ceil(
+            (
+                TR
+                - pp.calc_duration(gx_pre, gz_reph)
+                - pp.calc_duration(gz)
+                - pp.calc_duration(gx)
+            )
+            / seq.grad_raster_time
+        )
+        * seq.grad_raster_time
+    )
+    assert np.all(delay_TR >= pp.calc_duration(gx_spoil))
+
+    print(f"TE = {TE * 1e6:.0f} us")
+
+    if pp.calc_duration(gz_reph) > pp.calc_duration(gx_pre):
+        gx_pre.delay = pp.calc_duration(gz_reph) - pp.calc_duration(gx_pre)
+
+    rf_phase = 0
+    rf_inc = 0
+
+    # ======
+    # CONSTRUCT SEQUENCE
+    # ======
+    for i in range(Nr):
+        for c in range(2):
+            rf.phase_offset = rf_phase / 180 * np.pi
+            adc.phase_offset = rf_phase / 180 * np.pi
+            rf_inc = np.mod(rf_inc + rf_spoiling_inc, 360.0)
+            rf_phase = np.mod(rf_phase + rf_inc, 360.0)
+
+            gz.amplitude = -gz.amplitude  # Alternate GZ amplitude
+            gz_reph.amplitude = -gz_reph.amplitude
+
+            seq.add_block(rf, gz)
+            phi = delta * i
+
+            gpc = copy(gx_pre)
+            gps = copy(gx_pre)
+            gpc.amplitude = gx_pre.amplitude * np.cos(phi)
+            gps.amplitude = gx_pre.amplitude * np.sin(phi)
+            gps.channel = "y"
+
+            grc = copy(gx)
+            grs = copy(gx)
+            grc.amplitude = gx.amplitude * np.cos(phi)
+            grs.amplitude = gx.amplitude * np.sin(phi)
+            grs.channel = "y"
+
+            gsc = copy(gx_spoil)
+            gss = copy(gx_spoil)
+            gsc.amplitude = gx_spoil.amplitude * np.cos(phi)
+            gss.amplitude = gx_spoil.amplitude * np.sin(phi)
+            gss.channel = "y"
+
+            seq.add_block(gpc, gps, gz_reph)
+            seq.add_block(grc, grs, adc)
+            seq.add_block(gsc, gss, pp.make_delay(delay_TR))
+
+    # Check whether the timing of the sequence is correct
+    ok, error_report = seq.check_timing()
+    if ok:
+        print("Timing check passed successfully")
+    else:
+        print("Timing check failed. Error listing follows:")
+        [print(e) for e in error_report]
+
+    # ======
+    # VISUALIZATION
+    # ======
+    if plot:
+        seq.plot()
+
+        # Plot gradients to check for gaps and optimality of the timing
+        gw = seq.waveforms_and_times()[0]
+        # Plot the entire gradient shape
+        plt.figure()
+        plt.plot(gw[0][0], gw[0][1], gw[1][0], gw[1][1], gw[2][0], gw[2][1])
+        plt.show()
+
+    # =========
+    # WRITE .SEQ
+    # =========
+    if write_seq:
+        # Prepare the sequence output for the scanner
+        seq.set_definition(key="FOV", value=[fov, fov, slice_thickness])
+        seq.set_definition(key="Name", value="UTE")
+
+        seq.write(seq_filename)
+
+
+if __name__ == "__main__":
+    main(plot=True, write_seq=True)

+ 41 - 0
LF_scanner/pypulseq/sigpy_pulse_opts.py

@@ -0,0 +1,41 @@
+class SigpyPulseOpts:
+    def __init__(
+        self,
+        pulse_type: str = "slr",
+        ptype: str = "st",
+        ftype: str = "ls",
+        d1: float = 0.01,
+        d2: float = 0.01,
+        cancel_alpha_phs: bool = False,
+        n_bands: int = 3,
+        band_sep: int = 20,
+        phs_0_pt: str = "None",
+    ):
+        self.pulse_type = pulse_type
+
+        if pulse_type == "slr":
+            self.ptype = ptype
+            self.ftype = ftype
+            self.d1 = d1
+            self.d2 = d2
+            self.cancel_alpha_phs = cancel_alpha_phs
+
+        if pulse_type == "sms":
+            self.ptype = ptype
+            self.ftype = ftype
+            self.d1 = d1
+            self.d2 = d2
+            self.cancel_alpha_phs = cancel_alpha_phs
+            self.n_bands = n_bands
+            self.band_sep = band_sep
+            self.phs_0_pt = phs_0_pt
+
+    def __str__(self) -> str:
+        s = "Pulse options:"
+        s += "\nptype: " + str(self.ptype)
+        s += "\nftype: " + str(self.ftype)
+        s += "\nd1: " + str(self.d1)
+        s += "\nd2: " + str(self.d2)
+        s += "\ncancel_alpha_phs: " + str(self.cancel_alpha_phs)
+
+        return s

+ 93 - 0
LF_scanner/pypulseq/split_gradient.py

@@ -0,0 +1,93 @@
+from types import SimpleNamespace
+from typing import Tuple
+
+import numpy as np
+
+from LF_scanner.pypulseq.calc_duration import calc_duration
+from LF_scanner.pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+
+
+def split_gradient(
+    grad: SimpleNamespace, system: Opts = Opts()
+) -> Tuple[SimpleNamespace, SimpleNamespace, SimpleNamespace]:
+    """
+    Splits a trapezoidal gradient into slew up, flat top and slew down. Returns the individual gradient parts (slew up,
+    flat top and slew down) as extended trapezoid gradient objects. The delays in the individual gradient events are
+    adapted such that addGradients(...) produces an gradient equivalent to 'grad'.
+
+    See also:
+    - `pypulseq.split_gradient()`
+    - `pypulseq.make_extended_trapezoid()`
+    - `pypulseq.make_trapezoid()`
+    - `pypulseq.Sequence.sequence.Sequence.add_block()`
+    - `pypulseq.opts.Opts`
+
+    Parameters
+    ----------
+    grad : SimpleNamespace
+        Gradient event to be split into two gradient waveforms.
+    system : Opts, default=Opts()
+        System limits.
+
+    Returns
+    -------
+    grad1, grad2 : SimpleNamespace
+        Split gradient waveforms.
+
+    Raises
+    ------
+    ValueError
+         If arbitrary gradients are passed.
+         If non-gradient event is passed.
+    """
+    grad_raster_time = system.grad_raster_time
+    total_length = calc_duration(grad)
+
+    if grad.type == "trap":
+        channel = grad.channel
+        grad.delay = np.round(grad.delay / grad_raster_time) * grad_raster_time
+        grad.rise_time = np.round(grad.rise_time / grad_raster_time) * grad_raster_time
+        grad.flat_time = np.round(grad.flat_time / grad_raster_time) * grad_raster_time
+        grad.fall_time = np.round(grad.fall_time / grad_raster_time) * grad_raster_time
+
+        times = np.array([0, grad.rise_time])
+        amplitudes = np.array([0, grad.amplitude])
+        ramp_up = make_extended_trapezoid(
+            channel=channel,
+            system=system,
+            times=times,
+            amplitudes=amplitudes,
+            skip_check=True,
+        )
+        ramp_up.delay = grad.delay
+
+        times = np.array([0, grad.fall_time])
+        amplitudes = np.array([grad.amplitude, 0])
+        ramp_down = make_extended_trapezoid(
+            channel=channel,
+            system=system,
+            times=times,
+            amplitudes=amplitudes,
+            skip_check=True,
+        )
+        ramp_down.delay = total_length - grad.fall_time
+        ramp_down.t = ramp_down.t * grad_raster_time
+
+        flat_top = SimpleNamespace()
+        flat_top.type = "grad"
+        flat_top.channel = channel
+        flat_top.delay = grad.delay + grad.rise_time
+        flat_top.t = np.arange(
+            step=grad_raster_time,
+            stop=ramp_down.delay - grad_raster_time - grad.delay - grad.rise_time,
+        )
+        flat_top.waveform = grad.amplitude * np.ones(len(flat_top.t))
+        flat_top.first = grad.amplitude
+        flat_top.last = grad.amplitude
+
+        return ramp_up, flat_top, ramp_down
+    elif grad.type == "grad":
+        raise ValueError("Splitting of arbitrary gradients is not implemented yet.")
+    else:
+        raise ValueError("Splitting of unsupported event.")

+ 147 - 0
LF_scanner/pypulseq/split_gradient_at.py

@@ -0,0 +1,147 @@
+from copy import deepcopy
+from types import SimpleNamespace
+from typing import Tuple, Union
+
+import numpy as np
+
+from LF_scanner.pypulseq import eps
+from LF_scanner.pypulseq.make_extended_trapezoid import make_extended_trapezoid
+from LF_scanner.pypulseq.opts import Opts
+
+
+def split_gradient_at(
+    grad: SimpleNamespace, time_point: float, system: Opts = Opts()
+) -> Union[SimpleNamespace, Tuple[SimpleNamespace, SimpleNamespace]]:
+    """
+    Splits a trapezoidal gradient into two extended trapezoids defined by the cut line. Returns the two gradient parts
+    by cutting the original 'grad' at 'time_point'. For the input type 'trapezoid' the results are returned as extended
+    trapezoids, for 'arb' as arbitrary gradient objects. The delays in the individual gradient events are adapted such
+    that add_gradients(...) produces a gradient equivalent to 'grad'.
+
+    See also:
+    - `pypulseq.split_gradient()`
+    - `pypulseq.make_extended_trapezoid()`
+    - `pypulseq.make_trapezoid()`
+    - `pypulseq.Sequence.sequence.Sequence.add_block()`
+    - `pypulseq.opts.Opts`
+
+    Parameters
+    ----------
+    grad : SimpleNamespace
+        Gradient event to be split into two gradient events.
+    time_point : float
+        Time point at which `grad` will be split into two gradient waveforms.
+    system : Opts, default=Opts()
+        System limits.
+
+    Returns
+    -------
+    grad1, grad2 : SimpleNamespace
+        Gradient waveforms after splitting.
+
+    Raises
+    ------
+    ValueError
+        If non-gradient event is passed.
+    """
+    # copy() to emulate pass-by-value; otherwise passed grad is modified
+    grad = deepcopy(grad)
+
+    grad_raster_time = system.grad_raster_time
+
+    time_index = np.round(time_point / grad_raster_time)
+    # Work around floating-point arithmetic limitation
+    time_point = np.round(time_index * grad_raster_time, 6)
+    channel = grad.channel
+
+    if grad.type == "grad":
+        # Check if we have an arbitrary gradient or an extended trapezoid
+        if np.abs(grad.tt[-1] - 0.5 * grad_raster_time) < 1e-10 and np.all(
+            np.abs(grad.tt[1:] - grad.tt[:-1] - grad_raster_time) < 1e-10
+        ):
+            # Arbitrary gradient -- trivial conversion
+            # If time point is out of range we have nothing to do
+            if time_index == 0 or time_index >= len(grad.tt):
+                return grad
+            else:
+                grad1 = grad
+                grad2 = grad
+                grad1.last = 0.5 * (
+                    grad.waveform[time_index - 1] + grad.waveform[time_index]
+                )
+                grad2.first = grad1.last
+                grad2.delay = grad.delay + grad.t[time_index]
+                grad1.t = grad.t[:time_index]
+                grad1.waveform = grad.waveform[:time_index]
+                grad2.t = grad.t[time_index:] - time_point
+                grad2.waveform = grad.waveform[time_index:]
+                return grad1, grad2
+        else:
+            # Extended trapezoid
+            times = grad.tt
+            amplitudes = grad.waveform
+    elif grad.type == "trap":
+        grad.delay = np.round(grad.delay / grad_raster_time) * grad_raster_time
+        grad.rise_time = np.round(grad.rise_time / grad_raster_time) * grad_raster_time
+        grad.flat_time = np.round(grad.flat_time / grad_raster_time) * grad_raster_time
+        grad.fall_time = np.round(grad.fall_time / grad_raster_time) * grad_raster_time
+
+        # Prepare the extended trapezoid structure
+        if grad.flat_time == 0:
+            times = [0, grad.rise_time, grad.rise_time + grad.fall_time]
+            amplitudes = [0, grad.amplitude, 0]
+        else:
+            times = [
+                0,
+                grad.rise_time,
+                grad.rise_time + grad.flat_time,
+                grad.rise_time + grad.flat_time + grad.fall_time,
+            ]
+            amplitudes = [0, grad.amplitude, grad.amplitude, 0]
+    else:
+        raise ValueError("Splitting of unsupported event.")
+
+    # If the split line is behind the gradient, there is no second gradient to create
+    if time_point >= grad.delay + times[-1]:
+        raise ValueError(
+            "Splitting of gradient at time point after the end of gradient."
+        )
+
+    # If the split line goes through the delay
+    if time_point < grad.delay:
+        times = np.insert(grad.delay + times, 0, 0)
+        amplitudes = [0, amplitudes]
+        grad.delay = 0
+    else:
+        time_point -= grad.delay
+
+    amplitudes = np.array(amplitudes)
+    times = np.array(times).round(6)  # Work around floating-point arithmetic limitation
+
+    # Sample at time point
+    amp_tp = np.interp(x=time_point, xp=times, fp=amplitudes)
+    t_eps = 1e-10
+    times1 = np.append(times[np.where(times < time_point - t_eps)], time_point)
+    amplitudes1 = np.append(amplitudes[np.where(times < time_point - t_eps)], amp_tp)
+    times2 = np.insert(times[times > time_point + t_eps], 0, time_point) - time_point
+    amplitudes2 = np.insert(amplitudes[times > time_point + t_eps], 0, amp_tp)
+
+    # Recreate gradients
+    grad1 = make_extended_trapezoid(
+        channel=channel,
+        system=system,
+        times=times1,
+        amplitudes=amplitudes1,
+        skip_check=True,
+    )
+    grad1.delay = grad.delay
+    grad2 = make_extended_trapezoid(
+        channel=channel,
+        system=system,
+        times=times2,
+        amplitudes=amplitudes2,
+        skip_check=True,
+    )
+    grad2.delay = time_point
+
+    return grad1, grad2

+ 37 - 0
LF_scanner/pypulseq/supported_labels_rf_use.py

@@ -0,0 +1,37 @@
+from typing import Tuple
+
+
+def get_supported_labels() -> Tuple[
+    str, str, str, str, str, str, str, str, str, str, str, str, str
+]:
+    """
+    Returns
+    -------
+    tuple
+        Supported labels.
+    """
+    return (
+        "SLC",
+        "SEG",
+        "REP",
+        "AVG",
+        "SET",
+        "ECO",
+        "PHS",
+        "LIN",
+        "PAR",
+        "NAV",
+        "REV",
+        "SMS",
+        "PMC",
+    )
+
+
+def get_supported_rf_uses() -> Tuple[str, str, str, str, str]:
+    """
+    Returns
+    -------
+    tuple
+        Supported RF use labels.
+    """
+    return "excitation", "refocusing", "inversion", "saturation", "preparation"

+ 0 - 0
LF_scanner/pypulseq/tests/__init__.py


+ 28 - 0
LF_scanner/pypulseq/tests/base.py

@@ -0,0 +1,28 @@
+from pathlib import Path
+
+import numpy as np
+
+
+def main(script: callable, matlab_seq_filename: str, pypulseq_seq_filename: str):
+    path_here = Path(__file__)  # Path of this file
+    pypulseq_seq_filename = (
+        path_here.parent / pypulseq_seq_filename
+    )  # Path to PyPulseq seq
+    matlab_seq_filename = (
+        path_here.parent / "matlab_seqs" / matlab_seq_filename
+    )  # Path to MATLAB seq
+
+    # Run PyPulseq script and write seq file
+    script.main(plot=False, write_seq=True, seq_filename=str(pypulseq_seq_filename))
+
+    # Read MATLAB and PyPulseq seq files, discard header and signature
+    seq_matlab = matlab_seq_filename.read_text().splitlines()[4:-7]
+    seq_pypulseq = pypulseq_seq_filename.read_text().splitlines()[4:-7]
+
+    pypulseq_seq_filename.unlink()  # Delete PyPulseq seq
+
+    diff_lines = np.setdiff1d(seq_matlab, seq_pypulseq)  # Mismatching lines
+    percentage_diff = len(diff_lines) / len(
+        seq_matlab
+    )  # % of lines that are mismatching; we tolerate upto 0.1%
+    assert percentage_diff < 1e-3  # Unit test

+ 19 - 0
LF_scanner/pypulseq/tests/test_MPRAGE.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_MPRAGE
+from pypulseq.tests import base
+
+
+class TestMPRAGE(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "mprage_matlab.seq"
+        pypulseq_seq_filename = "mprage_pypulseq.seq"
+        base.main(
+            script=write_MPRAGE,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_epi.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_epi
+from pypulseq.tests import base
+
+
+class TestEPI(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "epi_matlab.seq"
+        pypulseq_seq_filename = "epi_pypulseq.seq"
+        base.main(
+            script=write_epi,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_epi_label.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_epi_label
+from pypulseq.tests import base
+
+
+class TestEPILabel(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "epi_label_matlab.seq"
+        pypulseq_seq_filename = "epi_label_pypulseq.seq"
+        base.main(
+            script=write_epi_label,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_epi_se.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_epi_se
+from pypulseq.tests import base
+
+
+class TestEPISpinEcho(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "epi_se_matlab.seq"
+        pypulseq_seq_filename = "epi_se_pypulseq.seq"
+        base.main(
+            script=write_epi_se,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_epi_se_rs.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_epi_se_rs
+from pypulseq.tests import base
+
+
+class TestEPISpinEchoRS(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "epi_se_rs_matlab.seq"
+        pypulseq_seq_filename = "epi_se_rs_pypulseq.seq"
+        base.main(
+            script=write_epi_se_rs,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_gre.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_gre
+from pypulseq.tests import base
+
+
+class TestGRE(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "gre_matlab.seq"
+        pypulseq_seq_filename = "gre_pypulseq.seq"
+        base.main(
+            script=write_gre,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_gre_label.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_gre_label
+from pypulseq.tests import base
+
+
+class TestGRELabel(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "gre_label_matlab.seq"
+        pypulseq_seq_filename = "gre_label_pypulseq.seq"
+        base.main(
+            script=write_gre_label,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_gre_radial.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_radial_gre
+from pypulseq.tests import base
+
+
+class TestEPISpinEchoRS(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "gre_radial_matlab.seq"
+        pypulseq_seq_filename = "gre_radial_pypulseq.seq"
+        base.main(
+            script=write_radial_gre,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_haste.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_haste
+from pypulseq.tests import base
+
+
+class TestHASTE(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "haste_matlab.seq"
+        pypulseq_seq_filename = "haste_pypulseq.seq"
+        base.main(
+            script=write_haste,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 120 - 0
LF_scanner/pypulseq/tests/test_sigpy.py

@@ -0,0 +1,120 @@
+# sms - check MB
+# slr - check slice profile
+
+import unittest
+
+import numpy as np
+import sigpy.mri.rf as rf
+
+from pypulseq.make_sigpy_pulse import sigpy_n_seq
+from pypulseq.opts import Opts
+from pypulseq.sigpy_pulse_opts import SigpyPulseOpts
+
+
+class TestSigpyPulseMethods(unittest.TestCase):
+    def test_slr(self):
+        print("Testing SLR design")
+
+        time_bw_product = 4
+        slice_thickness = 3e-3  # Slice thickness
+        flip_angle = np.pi / 2
+        # Set system limits
+        system = Opts(
+            max_grad=32,
+            grad_unit="mT/m",
+            max_slew=130,
+            slew_unit="T/m/s",
+            rf_ringdown_time=30e-6,
+            rf_dead_time=100e-6,
+        )
+        pulse_cfg = SigpyPulseOpts(
+            pulse_type="slr",
+            ptype="st",
+            ftype="ls",
+            d1=0.01,
+            d2=0.01,
+            cancel_alpha_phs=False,
+            n_bands=3,
+            band_sep=20,
+            phs_0_pt="None",
+        )
+        rfp, gz, _, pulse = sigpy_n_seq(
+            flip_angle=flip_angle,
+            system=system,
+            duration=3e-3,
+            slice_thickness=slice_thickness,
+            time_bw_product=4,
+            return_gz=True,
+            pulse_cfg=pulse_cfg,
+        )
+
+        [a, b] = rf.sim.abrm(
+            pulse,
+            np.arange(
+                -20 * time_bw_product, 20 * time_bw_product, 40 * time_bw_product / 2000
+            ),
+            True,
+        )
+        Mxy = 2 * np.multiply(np.conj(a), b)
+        # pl.LinePlot(Mxy)
+        # print(np.sum(np.abs(Mxy)))
+        # peaks, dict = sis.find_peaks(np.abs(Mxy),threshold=0.5, plateau_size=40)
+        plateau_widths = np.sum(np.abs(Mxy) > 0.8)
+        self.assertTrue(29, plateau_widths)
+
+    def test_sms(self):
+        print("Testing SMS design")
+
+        time_bw_product = 4
+        slice_thickness = 3e-3  # Slice thickness
+        flip_angle = np.pi / 2
+        n_bands = 3
+        # Set system limits
+        system = Opts(
+            max_grad=32,
+            grad_unit="mT/m",
+            max_slew=130,
+            slew_unit="T/m/s",
+            rf_ringdown_time=30e-6,
+            rf_dead_time=100e-6,
+        )
+        pulse_cfg = SigpyPulseOpts(
+            pulse_type="sms",
+            ptype="st",
+            ftype="ls",
+            d1=0.01,
+            d2=0.01,
+            cancel_alpha_phs=False,
+            n_bands=n_bands,
+            band_sep=20,
+            phs_0_pt="None",
+        )
+        rfp, gz, _, pulse = sigpy_n_seq(
+            flip_angle=flip_angle,
+            system=system,
+            duration=3e-3,
+            slice_thickness=slice_thickness,
+            time_bw_product=4,
+            return_gz=True,
+            pulse_cfg=pulse_cfg,
+        )
+
+        [a, b] = rf.sim.abrm(
+            pulse,
+            np.arange(
+                -20 * time_bw_product, 20 * time_bw_product, 40 * time_bw_product / 2000
+            ),
+            True,
+        )
+        Mxy = 2 * np.multiply(np.conj(a), b)
+        # pl.LinePlot(Mxy)
+        # print(np.sum(np.abs(Mxy)))
+        # peaks, dict = sis.find_peaks(np.abs(Mxy),threshold=0.5, plateau_size=40)
+        plateau_widths = np.sum(np.abs(Mxy) > 0.8)
+        self.assertEqual(
+            29 * n_bands, plateau_widths
+        )  # if slr has 29 > 0.8, then sms with MB = n_bands
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_tse.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_tse
+from pypulseq.tests import base
+
+
+class TestTSE(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "tse_matlab.seq"
+        pypulseq_seq_filename = "tse_pypulseq.seq"
+        base.main(
+            script=write_tse,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 19 - 0
LF_scanner/pypulseq/tests/test_ute.py

@@ -0,0 +1,19 @@
+import unittest
+
+from pypulseq.seq_examples.scripts import write_ute
+from pypulseq.tests import base
+
+
+class TestUTE(unittest.TestCase):
+    def test_write_epi(self):
+        matlab_seq_filename = "ute_matlab.seq"
+        pypulseq_seq_filename = "ute_pypulseq.seq"
+        base.main(
+            script=write_ute,
+            matlab_seq_filename=matlab_seq_filename,
+            pypulseq_seq_filename=pypulseq_seq_filename,
+        )
+
+
+if __name__ == "__main__":
+    unittest.main()

+ 39 - 0
LF_scanner/pypulseq/traj_to_grad.py

@@ -0,0 +1,39 @@
+from typing import Tuple
+
+import numpy as np
+
+from LF_scanner.pypulseq.opts import Opts
+
+
+def traj_to_grad(
+    k: np.ndarray, raster_time: float = Opts().grad_raster_time
+) -> Tuple[np.ndarray, np.ndarray]:
+    """
+    Convert k-space trajectory `k` into gradient waveform in compliance with `raster_time` gradient raster time.
+
+    Parameters
+    ----------
+    k : numpy.ndarray
+        K-space trajectory to be converted into gradient waveform.
+    raster_time : float, default=Opts().grad_raster_time
+        Gradient raster time.
+
+    Returns
+    -------
+    g : numpy.ndarray
+        Gradient waveform.
+    sr : numpy.ndarray
+        Slew rate.
+    """
+    # Compute finite difference for gradients in Hz/m
+    g = (k[1:] - k[:-1]) / raster_time
+    # Compute the slew rate
+    sr0 = (g[1:] - g[:-1]) / raster_time
+
+    # Gradient is now sampled between k-space points whilst the slew rate is between gradient points
+    sr = np.zeros(len(sr0) + 1)
+    sr[0] = sr0[0]
+    sr[1:-1] = 0.5 * (sr0[-1] + sr0[1:])
+    sr[-1] = sr0[-1]
+
+    return g, sr

+ 40 - 0
LF_scanner/pypulseq/utilities/TSE_k_space_fill.py

@@ -0,0 +1,40 @@
+def TSE_k_space_fill(n_ex, ETL, k_steps, TE_eff_number, order):
+    # function defines phase encoding steps for k space filling in liner order
+    # with shifting according to the TE effective number
+
+    k_space_list_with_zero = []
+    for i in range(ETL):
+        k_space_list_with_zero.append(int((ETL - 1) * n_ex - i * n_ex))
+    # print(k_space_list_with_zero)
+    central_num = int(k_steps / 2)
+    # print(central_num)
+    index_central_line = k_space_list_with_zero.index(central_num)
+    shift = index_central_line - TE_eff_number + 1
+
+    if shift > 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+    elif shift < 0:
+        a = k_space_list_with_zero[:shift]
+        b = k_space_list_with_zero[shift:]
+        k_space_list_with_zero = b + a
+
+    if order == 'non_linear':
+        a = k_space_list_with_zero[:((shift-index_central_line)*2+1)]
+        b = k_space_list_with_zero[((shift-index_central_line)*2+1):]
+        for i in range(1, int(len(b)/2)+1):
+            a.append(b[i-1])
+            a.append(b[-i])
+        a.append(b[i])
+        k_space_list_with_zero = a
+
+    k_space_order_filing = [k_space_list_with_zero]
+    for i in range(n_ex - 1):
+        k_space_list_temp = []
+        for k in k_space_list_with_zero:
+            k_space_list_temp.append(k + i + 1)
+        k_space_order_filing.append(k_space_list_temp)
+
+
+    return k_space_order_filing

+ 0 - 0
LF_scanner/pypulseq/utilities/__init__.py


+ 39 - 0
LF_scanner/pypulseq/utilities/magn_prep/FS_CHESS_block.py

@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+"""
+A subroutine to add FS block.
+Requires the params structure as input.
+
+Need to think of the reequired output (pulse sequence variable itself?)
+
+@author: petrm
+"""
+
+#imports
+from LF_scanner.pypulseq.make_gauss_pulse import make_gauss_pulse
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.calc_rf_center import calc_rf_center
+from LF_scanner.pypulseq.calc_duration import calc_duration
+import numpy as np
+
+def FS_CHESS_block(params, scanner_parameters, gz90):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    flip_fs = round(110 * np.pi / 180, 3) #TODO ad parameter to GUI
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.30  # TODO add to GUI
+    params['FS_pulse_duration'] = 8e-3  # TODO add to GUI
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+
+
+    rf_fs = make_gauss_pulse(flip_angle=flip_fs, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(params['BW_sat']), freq_offset=FS_sat_frequency)
+    #TODO
+    #rf_fs.phaseOffset=-2*pi*rf_fs.freqOffset*mr.calcRfCenter(rf_fs)
+    gx_fs = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*gz90.area, rise_time=params['dG'])
+    gy_fs = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_fs),
+                           area= 4*gz90.area, rise_time=params['dG'])
+
+    return rf_fs, gx_fs, gy_fs
+
+
+

+ 32 - 0
LF_scanner/pypulseq/utilities/magn_prep/IR_block.py

@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+"""
+A subroutine to add IR block.
+Requires the params structure as input.
+
+Need to think of the reequired output (pulse sequence variable itself?)
+
+@author: petrm
+"""
+
+#imports
+from math import pi
+import numpy as np
+
+from LF_scanner.pypulseq.make_sinc_pulse import make_sinc_pulse
+from LF_scanner.pypulseq.make_delay import make_delay
+
+
+def IR_block(params, scanner_parameters):
+    #function creates inversion recovery block with delay
+    #params['IR_time'] = 0.140  # STIR # TODO add to GUI
+    #params['IR_time'] = 2.250  # FLAIR # TODO add to GUI
+    flip_ir = round(180 * pi / 180) # TODO add to GUI
+    rf_ir, gz_ir, _ = make_sinc_pulse(flip_angle=flip_ir, system=scanner_parameters, duration=params['t_ref'],
+                                      slice_thickness=params['sl_thkn'], apodization=0.5,
+                                      time_bw_product=round(params['t_BW_product_ref'], 8), phase_offset=90 * pi / 180,
+                                      return_gz=True)
+    delay_IR = np.ceil(params['TI'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_ir, gz_ir, delay_IR
+

+ 43 - 0
LF_scanner/pypulseq/utilities/magn_prep/SPAIR_block.py

@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+"""
+A subroutine to add SPAIR fat suppression block.
+Requires the params structure as input.
+
+Need to think of the reequired output (pulse sequence variable itself?)
+
+@author: petrm
+"""
+
+#imports
+from math import pi
+import numpy as np
+
+from LF_scanner.pypulseq.make_delay import make_delay
+from LF_scanner.pypulseq.make_gauss_pulse import make_gauss_pulse
+from LF_scanner.pypulseq.make_trapezoid import make_trapezoid
+from LF_scanner.pypulseq.calc_duration import calc_duration
+
+def SPAIR_block(params, scanner_parameters, gz90):
+    #function creates CHESS saturation block with accompanied gx and gy spoiled gradients
+    params['B0'] = 1.5  # TODO add to GUI
+    params['FS_sat_ppm'] = -3.30  # TODO add to GUI
+    params['FS_pulse_duration'] = 0.01  # TODO add to GUI
+    #params['IR_time'] = 0.140  # SPAIR # TODO add to GUI
+    params['BW_sat'] = -176.26464
+    g_rf_area = gz90.area * 10
+
+    FS_sat_frequency = params['B0'] * 1e-6 * params['FS_sat_ppm'] * params['gamma']
+    flip_SPAIR = round(180 * pi / 180)
+
+    rf_SPAIR = make_gauss_pulse(flip_angle=flip_SPAIR, system=scanner_parameters, duration=params['FS_pulse_duration'],
+                             bandwidth=abs(params['BW_sat']), freq_offset=FS_sat_frequency)
+    gx_SPAIR = make_trapezoid(channel="x", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+    gy_SPAIR = make_trapezoid(channel="y", system=scanner_parameters, delay=calc_duration(rf_SPAIR),
+                           area= g_rf_area, rise_time=params['dG'])
+
+    delay_IR = np.ceil(params['TI'] / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    delay_IR = make_delay(delay_IR)
+
+    return rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR
+

+ 0 - 0
LF_scanner/pypulseq/utilities/magn_prep/__init__.py


+ 68 - 0
LF_scanner/pypulseq/utilities/magn_prep/magn_prep.py

@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+"""
+A subroutine to calculate duration of any magnetisation preparation block.
+Requires the params structure as input.
+
+@author: petrm
+"""
+#imports
+from MRI_seq.pypulseq.utilities.magn_prep.FS_CHESS_block import FS_CHESS_block
+from MRI_seq.pypulseq.utilities.magn_prep.SPAIR_block import SPAIR_block
+from MRI_seq.pypulseq.utilities.magn_prep.IR_block import IR_block
+from MRI_seq.pypulseq.calc_duration import calc_duration
+
+
+def magn_prep_duration(params, scanner_parameters, gz90):
+
+    params['FS'] = True #TODO: add parameters to GUI
+    params['SPAIR'] = False #TODO: add parameters to GUI
+    params['IR'] = False #TODO: add parameters to GUI
+
+    magn_prep_dur = 0
+
+    if params['FS']:
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, gz90)
+        magn_prep_dur += calc_duration(gx_fs)
+        return magn_prep_dur
+
+    elif params['SPAIR']:
+        rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR = SPAIR_block(params, scanner_parameters, gz90)
+        magn_prep_dur += calc_duration(gx_SPAIR)
+        magn_prep_dur += calc_duration(delay_IR)
+        return magn_prep_dur
+
+    elif params['IR']:
+        rf_ir, gz_ir, delay_IR = IR_block(params, scanner_parameters)
+        magn_prep_dur += max(calc_duration(rf_ir), calc_duration(gz_ir))
+        magn_prep_dur += calc_duration(delay_IR)
+        return magn_prep_dur
+
+    else:
+        return magn_prep_dur
+
+def magn_prep_add_block(params, scanner_parameters, gz90, seq):
+
+    params['FS'] = True  # TODO: add parameters to GUI
+    params['SPAIR'] = False  # TODO: add parameters to GUI
+    params['IR'] = False  # TODO: add parameters to GUI
+
+    if params['FS']:
+        rf_fs, gx_fs, gy_fs = FS_CHESS_block(params, scanner_parameters, gz90)
+        seq.add_block(gx_fs, gy_fs, rf_fs)
+        return seq
+
+    elif params['SPAIR']:
+        rf_SPAIR, gx_SPAIR, gy_SPAIR, delay_IR = SPAIR_block(params, scanner_parameters, gz90)
+        seq.add_block(gx_SPAIR, gy_SPAIR, rf_SPAIR)
+        seq.add_block(delay_IR)
+        return seq
+
+    elif params['IR']:
+        # TODO add correct offset from correct slice
+        rf_ir, gz_ir, delay_IR = IR_block(params, scanner_parameters)
+        seq.add_block(gz_ir, rf_ir)
+        seq.add_block(delay_IR)
+        return seq
+
+    else:
+        return seq

+ 17 - 0
LF_scanner/pypulseq/utilities/phase_grad_utils.py

@@ -0,0 +1,17 @@
+import numpy as np
+
+
+def create_k_steps(k_span, steps):
+    """
+    A function that returns a k_span gradient span with odd and even gradient steps
+    """
+    k_steps = np.array(range(steps + 1))
+
+    if (np.mod(steps, 2) == 0):
+        k_steps = (k_steps - steps / 2) / (steps / 2)
+    else:
+        k_steps = (k_steps - (steps + 1) / 2) / (steps / 2)
+
+    k_steps = np.flip(k_steps, 0)
+    k_steps = np.delete(k_steps, -1)
+    return k_steps * k_span * 0.5     

+ 188 - 0
LF_scanner/pypulseq/utilities/standart_RF.py

@@ -0,0 +1,188 @@
+# -*- coding: utf-8 -*-
+"""
+A subroutine functions to create different excitation and refocusing
+pulses accompanied by combined SS and spoil gradients.
+Requires the params structure as input.
+
+@author: petrm
+"""
+
+
+# import
+from MRI_seq.pypulseq.make_sinc_pulse import make_sinc_pulse
+from MRI_seq.pypulseq.make_trapezoid import make_trapezoid
+from MRI_seq.pypulseq.make_extended_trapezoid import make_extended_trapezoid
+import numpy as np
+
+
+# def tse_excitation_grad(params, scanner_parameters, area_gz_spoil, flip180):
+#
+#    return
+
+def refocusing_grad(params, scanner_parameters, area_gz_spoil, flip180, rf180_phase, spoil_duration, united: bool):
+    # Create 180 degree SS refocusing pulse with SS and spoiled gradients
+    rf180, gz_ref, _ = make_sinc_pulse(
+        flip_angle=flip180,
+        system=scanner_parameters,
+        duration=params['t_ref'],
+        slice_thickness=params['sl_thkn'],
+        apodization=0.5,
+        time_bw_product=round(params['t_BW_product_ref'], 8),
+        phase_offset=rf180_phase,
+        use="refocusing",
+        return_gz=True,
+    )
+    gz180 = make_trapezoid(channel="z", system=scanner_parameters, amplitude=gz_ref.amplitude,
+                           flat_time=gz_ref.flat_time + 2*params['rf_dead_time'])
+
+    # spoil gradient around 180 RF pulse - G_crs
+    # t_gz_spoil = (np.ceil(params['t_ref'] / 2 / scanner_parameters.grad_raster_time)
+    #               * scanner_parameters.grad_raster_time)
+
+    if spoil_duration == 'min':
+        gz_spoil1 = make_trapezoid(channel='z', system=scanner_parameters, area=area_gz_spoil,
+                                   rise_time=params['dG'], flat_time=params['dG'])
+        gz_spoil2 = make_trapezoid(channel='z', system=scanner_parameters, area=area_gz_spoil,
+                                   rise_time=params['dG'], flat_time=params['dG'])
+    else:
+        spoil_duration = float(spoil_duration)
+        spoil_duration = np.ceil(spoil_duration / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+        gz_spoil1 = make_trapezoid(channel='z', system=scanner_parameters, area=area_gz_spoil,
+                                   duration=spoil_duration)
+        gz_spoil2 = make_trapezoid(channel='z', system=scanner_parameters, area=area_gz_spoil,
+                                   duration=spoil_duration)
+
+    # SS refocusing gradient with spoilers
+
+    gz_sp1_times = np.array(
+        [
+            0,
+            gz_spoil1.rise_time,
+            gz_spoil1.rise_time + gz_spoil1.flat_time,
+            gz_spoil1.rise_time + gz_spoil1.flat_time + gz_spoil1.fall_time
+        ]
+    )
+    gz_sp1_amp = np.array(
+        [
+            0,
+            gz_spoil1.amplitude,
+            gz_spoil1.amplitude,
+            gz180.amplitude
+        ]
+    )
+    gz_sp1 = make_extended_trapezoid(channel='z', system=scanner_parameters, times=gz_sp1_times, amplitudes=gz_sp1_amp)
+
+    gz_sp2_times = np.array(
+        [
+            0,
+            gz180.flat_time
+        ]
+    )
+    gz_sp2_amp = np.array(
+        [
+            gz180.amplitude,
+            gz180.amplitude
+        ]
+    )
+    gz_sp2 = make_extended_trapezoid(channel='z', system=scanner_parameters, times=gz_sp2_times, amplitudes=gz_sp2_amp)
+
+    gz_sp3_times = np.array(
+        [
+            0,
+            gz_spoil2.rise_time,
+            gz_spoil2.rise_time + gz_spoil2.flat_time,
+            gz_spoil2.rise_time + gz_spoil2.flat_time + gz_spoil2.fall_time
+        ]
+    )
+
+    gz_sp3_amp = np.array(
+        [
+            gz180.amplitude,
+            gz_spoil2.amplitude,
+            gz_spoil2.amplitude,
+            0
+        ]
+    )
+    gz_sp3 = make_extended_trapezoid(channel='z', system=scanner_parameters, times=gz_sp3_times, amplitudes=gz_sp3_amp)
+
+    if united:
+        return rf180, gz_sp1, gz_sp2, gz_sp3
+    else:
+        return rf180, gz_spoil1, gz180, gz_spoil2
+
+def readout_grad(params, scanner_parameters, spoil_duration, united: bool):
+    # Create readout gradient
+    readout_time = round(1 / params['BW_pixel'], 8)
+    k_read = np.double(params['Nf']) / np.double(params['FoV_f'])
+    t_gx = np.ceil(readout_time / scanner_parameters.grad_raster_time) * scanner_parameters.grad_raster_time
+    gx = make_trapezoid(channel='x', system=scanner_parameters, flat_area=k_read,
+                        flat_time=t_gx)
+
+    # generate gx spoiler gradient - G_crr
+    if spoil_duration == 'min':
+        gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area,
+                                  flat_time=params['dG'], rise_time=params['dG'])
+    else:
+        spoil_duration = float(spoil_duration)
+        spoil_duration = (np.ceil(spoil_duration / scanner_parameters.grad_raster_time)
+                          * scanner_parameters.grad_raster_time)
+        gx_spoil = make_trapezoid(channel='x', system=scanner_parameters, area=gx.area,
+                                  duration=spoil_duration)
+
+    # readout gradient with spoilers
+
+    gx_sp1_times = np.array(
+        [
+            0,
+            gx_spoil.rise_time,
+            gx_spoil.rise_time + gx_spoil.flat_time,
+            gx_spoil.rise_time + gx_spoil.flat_time + gx_spoil.fall_time
+        ]
+    )
+    gx_sp1_amp = np.array(
+        [
+            0,
+            gx_spoil.amplitude,
+            gx_spoil.amplitude,
+            gx.amplitude
+        ]
+    )
+    gx_sp1 = make_extended_trapezoid(channel='x', system=scanner_parameters, times=gx_sp1_times, amplitudes=gx_sp1_amp)
+
+    gx_sp2_times = np.array(
+        [
+            0,
+            gx.flat_time
+        ]
+    )
+    gx_sp2_amp = np.array(
+        [
+            gx.amplitude,
+            gx.amplitude
+        ]
+    )
+    gx_sp2 = make_extended_trapezoid(channel='x', system=scanner_parameters, times=gx_sp2_times, amplitudes=gx_sp2_amp)
+
+    gx_sp3_times = np.array(
+        [
+            0,
+            gx_spoil.rise_time,
+            gx_spoil.rise_time + gx_spoil.flat_time,
+            gx_spoil.rise_time + gx_spoil.flat_time + gx_spoil.fall_time
+        ]
+    )
+
+    gx_sp3_amp = np.array(
+        [
+            gx.amplitude,
+            gx_spoil.amplitude,
+            gx_spoil.amplitude,
+            0
+        ]
+    )
+    gx_sp3 = make_extended_trapezoid(channel='x', system=scanner_parameters, times=gx_sp3_times, amplitudes=gx_sp3_amp)
+
+    if united:
+        return gx_sp1, gx_sp2, gx_sp3
+    else:
+        return gx_spoil, gx, gx_spoil

+ 0 - 0
LF_scanner/pypulseq/utils/SAR/__init__.py


+ 0 - 0
LF_scanner/pypulseq/utils/__init__.py


+ 15 - 0
LF_scanner/pypulseq/utils/cumsum.py

@@ -0,0 +1,15 @@
+def cumsum(a, b, c=None, d=None, e=None):
+    if e != None:
+        s1 = a + b
+        s2 = s1 + c
+        s3 = s2 + d
+        return (a, s1, s2, s3, s3 + e)
+    elif d != None:
+        s1 = a + b
+        s2 = s1 + c
+        return (a, s1, s2, s2 + d)
+    elif c != None:
+        s = a + b
+        return (a, s, s + c)
+    else:
+        return (a, a + b)

+ 411 - 0
LF_scanner/pypulseq/utils/safe_pns_prediction.py

@@ -0,0 +1,411 @@
+# This code is a direct Python translation of the relevant functions in
+# https://github.com/filip-szczepankiewicz/safe_pns_prediction/ to perform
+# PNS calculations with pypulseq
+#
+# A small modification was made to safe_plot to plot long sequences better
+
+
+# BSD 3-Clause License
+
+# Copyright (c) 2018, Filip Szczepankiewicz and Thomas Witzel
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+# 1. Redistributions of source code must retain the above copyright notice, this
+#    list of conditions and the following disclaimer.
+
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+#    this list of conditions and the following disclaimer in the documentation
+#    and/or other materials provided with the distribution.
+
+# 3. Neither the name of the copyright holder nor the names of its
+#    contributors may be used to endorse or promote products derived from
+#    this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from types import SimpleNamespace
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+def safe_example_hw():
+    # function hw = safe_example_hw()
+    #
+    # SAFE model parameters for EXAMPLE scanner hardware (not a real scanner).
+    # See comments for units.
+    
+    hw = SimpleNamespace()
+    hw.name          = 'MP_GPA_EXAMPLE'
+    hw.checksum      = '1234567890'
+    hw.dependency    = ''
+    
+    hw.x = SimpleNamespace()
+    hw.x.tau1        =  0.20  # ms
+    hw.x.tau2        =  0.03  # ms
+    hw.x.tau3        =  3.00  # ms
+    hw.x.a1          =  0.40
+    hw.x.a2          =  0.10
+    hw.x.a3          =  0.50
+    hw.x.stim_limit  = 30.0   # T/m/s
+    hw.x.stim_thresh = 24.0   # T/m/s
+    hw.x.g_scale     = 0.35   # 1
+    
+    hw.y = SimpleNamespace()
+    hw.y.tau1        =  1.50  # ms
+    hw.y.tau2        =  2.50  # ms
+    hw.y.tau3        =  0.15  # ms
+    hw.y.a1          =  0.55
+    hw.y.a2          =  0.15
+    hw.y.a3          =  0.30
+    hw.y.stim_limit  = 15.0   # T/m/s
+    hw.y.stim_thresh = 12.0   # T/m/s
+    hw.y.g_scale     = 0.31   # 1
+    
+    hw.z = SimpleNamespace()
+    hw.z.tau1        =  2.00  # ms
+    hw.z.tau2        =  0.12  # ms
+    hw.z.tau3        =  1.00  # ms
+    hw.z.a1          =  0.42
+    hw.z.a2          =  0.40
+    hw.z.a3          =  0.18
+    hw.z.stim_limit  = 25.0   # T/m/s
+    hw.z.stim_thresh = 20.0   # T/m/s
+    hw.z.g_scale     = 0.25   # 1
+    return hw
+
+
+def safe_example_gwf():
+    # function function [gwf, rf, dt] = safe_example_gwf()
+    # Waveform with some frequency matching by Filip Szczepankiewicz.
+    #
+    # Waveform was optimized in the NOW framework by Jens Sjölund et al.
+    # https://github.com/jsjol/NOW
+    #
+    # Optimization was Maxwell-compensated to remove effects of concomitant
+    # gradients.
+    # https://arxiv.org/ftp/arxiv/papers/1903/1903.03357.pdf
+    
+    ## STE
+    dt  = 1e-3 # ms
+    
+    # T/m
+    gwf = 0.08 * np.array([
+        [0,         0,         0],
+        [-0.2005,    0.9334,    0.3029],
+        [-0.2050,    0.9324,    0.3031],
+        [-0.2146,    0.9302,    0.3032],
+        [-0.2313,    0.9263,    0.3030],
+        [-0.2589,    0.9193,    0.3019],
+        [-0.3059,    0.9060,    0.2980],
+        [-0.3892,    0.8767,    0.2883],
+        [-0.3850,    0.7147,    0.3234],
+        [-0.3687,    0.5255,    0.3653],
+        [-0.3509,    0.3241,    0.4070],
+        [-0.3323,    0.1166,    0.4457],
+        [-0.3136,   -0.0906,    0.4783],
+        [-0.2956,   -0.2913,    0.5019],
+        [-0.2790,   -0.4793,    0.5139],
+        [-0.2642,   -0.6491,    0.5118],
+        [-0.2518,   -0.7957,    0.4939],
+        [-0.2350,   -0.8722,    0.4329],
+        [-0.2187,   -0.9111,    0.3541],
+        [-0.2063,   -0.9409,    0.2747],
+        [-0.1977,   -0.9627,    0.1933],
+        [-0.1938,   -0.9768,    0.1080],
+        [-0.1967,   -0.9820,    0.0159],
+        [-0.2114,   -0.9751,   -0.0883],
+        [-0.2292,   -0.9219,   -0.2150],
+        [-0.2299,   -0.8091,   -0.3561],
+        [-0.2290,   -0.6748,   -0.5011],
+        [-0.2253,   -0.5239,   -0.6460],
+        [-0.2178,   -0.3620,   -0.7868],
+        [-0.2056,   -0.1948,   -0.9194],
+        [-0.1391,   -0.0473,   -0.9908],
+        [-0.0476,    0.0607,   -0.9987],
+        [ 0.0215,    0.1452,   -0.9909],
+        [ 0.0725,    0.2136,   -0.9759],
+        [ 0.1114,    0.2709,   -0.9579],
+        [ 0.1426,    0.3204,   -0.9383],
+        [ 0.1690,    0.3641,   -0.9177],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [ 0,         0,         0],
+        [-0.3734,   -0.1768,    0.9125],
+        [-0.3825,   -0.2310,    0.8965],
+        [-0.3919,   -0.2895,    0.8752],
+        [-0.4015,   -0.3543,    0.8465],
+        [-0.4108,   -0.4290,    0.8065],
+        [-0.4182,   -0.5202,    0.7469],
+        [-0.4178,   -0.6423,    0.6451],
+        [-0.3855,   -0.8173,    0.4321],
+        [-0.3110,   -0.9418,    0.1401],
+        [-0.2526,   -0.9669,   -0.0674],
+        [-0.2100,   -0.9541,   -0.2213],
+        [-0.1766,   -0.9227,   -0.3474],
+        [-0.1491,   -0.8788,   -0.4570],
+        [-0.1258,   -0.8239,   -0.5555],
+        [-0.1056,   -0.7583,   -0.6459],
+        [-0.0882,   -0.6809,   -0.7293],
+        [-0.0734,   -0.5900,   -0.8061],
+        [-0.0615,   -0.4830,   -0.8753],
+        [-0.0533,   -0.3556,   -0.9349],
+        [-0.0506,   -0.2005,   -0.9801],
+        [-0.0575,   -0.0019,   -1.0000],
+        [-0.0909,    0.2976,   -0.9521],
+        [-0.3027,    0.9509,   -0.0860],
+        [-0.2737,    0.9610,   -0.0692],
+        [-0.2524,    0.9675,   -0.0596],
+        [-0.2364,    0.9719,   -0.0533],
+        [-0.2245,    0.9749,   -0.0490],
+        [-0.2158,    0.9770,   -0.0459],
+        [-0.2097,    0.9785,   -0.0439],
+        [-0.2058,    0.9794,   -0.0426],
+        [-0.2039,    0.9798,   -0.0420],
+        [ 0,         0,         0]
+        ])
+    
+    rf = np.ones(gwf.shape[0])
+    rf[40:] = -1
+    
+    return gwf, rf, dt
+
+
+def safe_hw_check(hw):
+    # function safe_hw_check(hw)
+    #
+    # Make sure that all is well with the hardware configuration.
+    
+    if abs(hw.x.a1 + hw.x.a2 + hw.x.a3 - 1) > 0.001 or \
+       abs(hw.y.a1 + hw.y.a2 + hw.y.a3 - 1) > 0.001 or \
+       abs(hw.z.a1 + hw.z.a2 + hw.z.a3 - 1) > 0.001:
+        raise ValueError('Hardware specification a1+a2+a3 must be equal to 1!')
+    
+    axl = ['x', 'y', 'z']
+    fnl = ['stim_limit', 'stim_thresh', 'tau1', 'tau2', 'tau3', 'a1', 'a2', 'a3', 'g_scale']
+    
+    for axn in axl:
+        if not hasattr(hw, axn):
+            raise ValueError(f"'{axn}' missing in hardware specification")
+        
+        hw_ax = getattr(hw, axn)
+        for par in fnl:
+            if not hasattr(hw_ax, par):
+                raise ValueError(f"'{axn}.{par}' missing in hardware specification")
+
+
+def safe_longest_time_const(hw):
+    # function ltau = safe_longest_time_const(hw)
+    # Get the longest time constant. Can be used to estimate the size of zero
+    # padding.
+
+    return max([hw.x.tau1, hw.x.tau2, hw.x.tau3,
+                hw.y.tau1, hw.y.tau2, hw.y.tau3,
+                hw.z.tau1, hw.z.tau2, hw.z.tau3])
+
+
+def safe_pns_model(dgdt, dt, hw):
+    # function stim = safe_pns_model(dgdt, dt, hw)
+    #
+    # dgdt (nx3) is in T/m/s
+    # dt   (1x1) is in s
+    # All time coefficients (a1 and tau1 etc.) are in ms.
+    #
+    # This PNS model is based on the SAFE-abstract
+    # SAFE-Model - A New Method for Predicting Peripheral Nerve Stimulations in MRI
+    # by Franz X. Herbank and Matthias Gebhardt. Abstract No 2007. 
+    # Proc. Intl. Soc. Mag. Res. Med. 8, 2000, Denver, Colorado, USA
+    # https://cds.ismrm.org/ismrm-2000/PDF7/2007.PDF
+    # 
+    # The main SAFE-model was coded by Thomas Witzel @ Martinos Center,
+    # MGH, HMS, Boston, MA, USA.
+    # 
+    # The code was adapted/expanded/corrected by Filip Szczepankiewicz @ LMI
+    # BWH, HMS, Boston, MA, USA, and Lund University, Sweden.
+    
+    stim1 = hw.a1 * abs( safe_tau_lowpass(dgdt     , hw.tau1, dt * 1000) )
+    stim2 = hw.a2 *      safe_tau_lowpass(abs(dgdt), hw.tau2, dt * 1000)  
+    stim3 = hw.a3 * abs( safe_tau_lowpass(dgdt     , hw.tau3, dt * 1000) )
+    
+    stim = (stim1 + stim2 + stim3) / hw.stim_limit * hw.g_scale * 100
+    
+    return stim
+    
+    # Not sure where something goes awry, probably in the lowpass filter, but
+    # compared to the Siemens simulator we are exactly a factor of pi off, so
+    # I'm dividing the final result by pi.
+    # Note also that the final result is essentially some kind of arbitrary
+    # unit. - TW
+    
+    # UPDATE 210720 - The pi factor was not quite correct. Instead, the correct
+    # factor was determined by the gradient scale factor (hw.g_scale, defined 
+    # in the .asc file). Thanks to Maxim Zaitsev for supporting this buggfix and 
+    # validating that the updated code is accurate. - FSz
+
+
+def safe_tau_lowpass(dgdt, tau, dt, eps=1e-16):
+    # function fw = safe_tau_lowpass(dgdt, tau, dt)
+    #
+    # Apply a RC lowpass filter with time constant tau = RC to data with sampling
+    # interval dt. NOTE tau and dt need to be in the same unit (i.e. s or ms)
+    # The SAFE model abstract by Hebrank et.al. just says "Lowpass with time-constant tau",
+    # so I decided to make the most simple filter possible here.
+    # The RC lowpass is also appealing because its something Siemens could have
+    # easily implemented on their hardware stimulation monitors, so I'm probably
+    # pretty close. - TW
+    #
+    # UPDATE 230206 - There was a factor alpha missing on the first sample it
+    # has now been corrected. Thanks to Oliver Schad for finding this error.
+    # - FSz
+    
+    alpha = dt / (tau + dt)
+    
+    # Calculate number of elements in filter to reach desired accuracy (eps)
+    n = min(round(np.log(eps) / np.log(1-alpha)), dgdt.shape[0])
+    filt = (1-alpha)**np.arange(n)
+
+    # Implements lowpass filter using convolution to get rid of for loop in original code
+    return alpha * np.convolve(dgdt, filt)[:dgdt.shape[0]]
+
+
+def safe_gwf_to_pns(gwf, rf, dt, hw, do_padding=True):
+    # function [pns, res] = safe_gwf_to_pns(gwf, rf, dt, hw, doPadding)
+    # 
+    # gwf (nx3) in T/m
+    # dt  (1x1) in s
+    # hw  (struct) is structure that describes the hardware configuration and PNS
+    # response. Example: hw = safe_example_hw().
+    # doPadding adds zeropadding based on the decay time.
+    #
+    # This PNS model is based on the SAFE-abstract
+    # SAFE-Model - A New Method for Predicting Peripheral Nerve Stimulations in MRI
+    # by Franz X. Herbank and Matthias Gebhardt. Abstract No 2007. 
+    # Proc. Intl. Soc. Mag. Res. Med. 8, 2000, Denver, Colorado, USA
+    # https://cds.ismrm.org/ismrm-2000/PDF7/2007.PDF
+    # 
+    # The main SAFE-model was coded by Thomas Witzel @ Martinos Center,
+    # MGH, HMS, Boston, MA, USA.
+    # 
+    # The code was adapted/expanded by Filip Szczepankiewicz @ LMI
+    # BWH, HMS, Boston, MA, USA.
+
+    if do_padding:
+        zpt = safe_longest_time_const(hw) * 4 / 1000 # s
+        pad1 = round(zpt/4/dt)
+        pad2 = round(zpt/1/dt)
+
+        gwf = np.pad(gwf, ((pad1, pad2), (0,0)))
+        rf = np.pad(rf, (pad1, pad2))
+
+    safe_hw_check(hw)
+    
+    dgdt = np.diff(gwf, axis=0) / dt
+    pns = np.zeros(dgdt.shape)
+    
+    pns[:,0] = safe_pns_model(dgdt[:,0], dt, hw.x)
+    pns[:,1] = safe_pns_model(dgdt[:,1], dt, hw.y)
+    pns[:,2] = safe_pns_model(dgdt[:,2], dt, hw.z)
+    
+    # Export relevant paramters
+    res = SimpleNamespace()
+    res.pns  = pns
+    res.gwf  = gwf
+    res.rf   = rf
+    res.dgdt = dgdt
+    res.dt   = dt
+    res.hw   = hw
+    
+    return pns, res
+
+def safe_plot(pns, dt=None, envelope=True, envelope_points=500):
+    # function h = safe_plot(pns, dt)
+    # pns is relative PNS waveform (nx3)
+    # dt is time step size in seconds.
+        
+    pnsnorm = np.sqrt((pns**2).sum(axis=1))
+    
+    # FZ: Added option to plot the moving maximum of pns and pnsnorm to keep
+    #     plots for long sequences intelligible
+    if envelope and pns.shape[0] > envelope_points:
+        N = int(np.ceil(pns.shape[0] / envelope_points))
+        if dt != None:
+            dt *= N
+        
+        if pns.shape[0] % N != 0:
+            pns = np.concatenate((pns, np.zeros((N - pns.shape[0] % N, pns.shape[1]))))
+            pnsnorm = np.concatenate((pnsnorm, np.zeros((N - pnsnorm.shape[0] % N))))
+
+        pns = pns.reshape(pns.shape[0]//N, N, pns.shape[1])
+        pns = pns.max(axis=1)
+        pnsnorm = pnsnorm.reshape(pnsnorm.shape[0]//N, N)
+        pnsnorm = pnsnorm.max(axis=1)
+        
+    if dt == None:
+        ttot    = 1 # au
+        xlabstr = 'Time [a.u.]'
+    else:
+        ttot = pns.shape[0] * dt * 1000 # ms
+        xlabstr = 'Time [ms]'
+
+    
+    t = np.linspace(0, ttot, pns.shape[0])
+        
+    plt.plot(t, pns[:,0], 'r-',
+             t, pns[:,1], 'g-',
+             t, pns[:,2], 'b-',
+             t, pnsnorm , 'k-')
+        
+    plt.ylim([0, 120])
+    plt.xlim([min(t), max(t)])
+    
+    plt.title(f'Predicted PNS ({max(pnsnorm):0.0f}%)')
+    
+    plt.xlabel(xlabstr)
+    plt.ylabel('Relative stimulation [%]')
+    
+    plt.plot([0, max(t)], [max(pnsnorm), max(pnsnorm)], 'k:')
+
+    plt.legend([f'X ({max(pns[:,0]):0.0f}%)',
+                f'Y ({max(pns[:,1]):0.0f}%)',
+                f'Z ({max(pns[:,2]):0.0f}%)',
+                f'nrm ({max(pnsnorm):0.0f}%)'], loc='best')
+
+
+def safe_example():
+    # Load an exampe gradient waveform
+    [gwf, rf, dt] = safe_example_gwf()
+    
+    # Load reponse parameters for example hardware
+    hw = safe_example_hw()
+    
+    # Check if hardware parameters are consistent
+    safe_hw_check(hw)
+    
+    # Check if this hw is part of the library (validate hw)
+    # safe_hw_verify(hw)
+    
+    # Predict PNS levels
+    pns, res = safe_gwf_to_pns(gwf, rf, dt, hw, 1)
+    
+    # Plot some results
+    safe_plot(pns, dt)
+
+
+if __name__ == '__main__':
+    safe_example()

+ 1 - 0
LF_scanner/pypulseq/utils/siemens/__init__.py

@@ -0,0 +1 @@
+

+ 105 - 0
LF_scanner/pypulseq/utils/siemens/asc_to_hw.py

@@ -0,0 +1,105 @@
+from types import SimpleNamespace
+from typing import List
+import numpy as np
+
+
+def asc_to_acoustic_resonances(asc : dict) -> List[dict]:
+    """
+    Convert ASC dictionary from readasc to list of acoustic resonances
+
+    Parameters
+    ----------
+    asc : dict
+        ASC dictionary, see readasc
+
+    Returns
+    -------
+    List[dict]
+        List of acoustic resonances (specified by frequency and bandwidth fields).
+    """
+    
+    if 'aflGCAcousticResonanceFrequency' in asc:
+        freqs = asc['aflGCAcousticResonanceFrequency']
+        bw = asc['aflGCAcousticResonanceBandwidth']
+    else:
+        freqs = asc['asGPAParameters'][0]['sGCParameters']['aflAcousticResonanceFrequency']
+        bw = asc['asGPAParameters'][0]['sGCParameters']['aflAcousticResonanceBandwidth']
+    
+    return [dict(frequency=f, bandwidth=b) for f,b in zip(freqs.values(), bw.values()) if f != 0]
+
+def asc_to_hw(asc : dict, cardiac_model : bool = False) -> SimpleNamespace:
+    """
+    Convert ASC dictionary from readasc to SAFE hardware description.
+
+    Parameters
+    ----------
+    asc : dict
+        ASC dictionary, see readasc
+    cardiac_model : bool
+        Whether or not to read the cardiac stimulation model instead of the
+        default PNS model (returns None if not available)
+
+    Returns
+    -------
+    SimpleNamespace
+        SAFE hardware description
+    """
+    hw = SimpleNamespace()
+    
+    if 'asCOMP' in asc and 'tName' in asc['asCOMP']:
+        hw.name = asc['asCOMP']['tName']
+    else:
+        hw.name = 'unknown'
+
+    if 'GradPatSup' in asc:
+        asc_pns = asc['GradPatSup']['Phys']['PNS']
+    else:
+        asc_pns = asc
+    
+    if cardiac_model:
+        if 'GradPatSup' in asc and 'CarNS' in asc['GradPatSup']['Phys']:
+            asc_pns = asc['GradPatSup']['Phys']['CarNS']
+        else:
+            return None
+
+    hw.x = SimpleNamespace()
+    hw.x.tau1        = asc_pns['flGSWDTauX'][0]  # ms
+    hw.x.tau2        = asc_pns['flGSWDTauX'][1]  # ms
+    hw.x.tau3        = asc_pns['flGSWDTauX'][2]  # ms
+    hw.x.a1          = asc_pns['flGSWDAX'][0]
+    hw.x.a2          = asc_pns['flGSWDAX'][1]
+    hw.x.a3          = asc_pns['flGSWDAX'][2]
+    hw.x.stim_limit  = asc_pns['flGSWDStimulationLimitX']  # T/m/s
+    hw.x.stim_thresh = asc_pns['flGSWDStimulationThresholdX']  # T/m/s
+    
+    hw.y = SimpleNamespace()
+    hw.y.tau1        = asc_pns['flGSWDTauY'][0]  # ms
+    hw.y.tau2        = asc_pns['flGSWDTauY'][1]  # ms
+    hw.y.tau3        = asc_pns['flGSWDTauY'][2]  # ms
+    hw.y.a1          = asc_pns['flGSWDAY'][0]
+    hw.y.a2          = asc_pns['flGSWDAY'][1]
+    hw.y.a3          = asc_pns['flGSWDAY'][2]
+    hw.y.stim_limit  = asc_pns['flGSWDStimulationLimitY']  # T/m/s
+    hw.y.stim_thresh = asc_pns['flGSWDStimulationThresholdY']  # T/m/s
+    
+    hw.z = SimpleNamespace()
+    hw.z.tau1        = asc_pns['flGSWDTauZ'][0]  # ms
+    hw.z.tau2        = asc_pns['flGSWDTauZ'][1]  # ms
+    hw.z.tau3        = asc_pns['flGSWDTauZ'][2]  # ms
+    hw.z.a1          = asc_pns['flGSWDAZ'][0]
+    hw.z.a2          = asc_pns['flGSWDAZ'][1]
+    hw.z.a3          = asc_pns['flGSWDAZ'][2]
+    hw.z.stim_limit  = asc_pns['flGSWDStimulationLimitZ']  # T/m/s
+    hw.z.stim_thresh = asc_pns['flGSWDStimulationThresholdZ']  # T/m/s
+    
+    if 'asGPAParameters' in asc:
+        hw.x.g_scale     = asc['asGPAParameters'][0]['sGCParameters']['flGScaleFactorX']
+        hw.y.g_scale     = asc['asGPAParameters'][0]['sGCParameters']['flGScaleFactorY']
+        hw.z.g_scale     = asc['asGPAParameters'][0]['sGCParameters']['flGScaleFactorZ']
+    else:
+        print('Warning: Gradient scale factors not in ASC file: assuming 1/pi')
+        hw.x.g_scale = 1/np.pi
+        hw.y.g_scale = 1/np.pi
+        hw.z.g_scale = 1/np.pi
+    
+    return hw

+ 97 - 0
LF_scanner/pypulseq/utils/siemens/readasc.py

@@ -0,0 +1,97 @@
+import re
+from typing import Tuple
+
+def readasc(filename : str) -> Tuple[dict, dict]:
+    """
+    Reads Siemens ASC ascii-formatted textfile and returns a dictionary
+    structure.
+    E.g. a[0].b[2][3].c = "string"
+    parses into:
+      asc['a'][0]['b'][2][3]['c'] = "string"
+
+    Parameters
+    ----------
+    filename : str
+        Filename of the ASC file.
+
+    Returns
+    -------
+    asc : dict
+        Dictionary of ASC part of file.
+    extra : dict
+        Dictionary of other fields after "ASCCONV END"
+    """
+    
+    asc, extra = {}, {}
+    
+    # Read asc file and convert it into a dictionary structure
+    with open(filename, 'r') as fp:
+        end_of_asc = False
+        
+        for next_line in fp:
+            next_line = next_line.strip()
+            
+            if next_line == '### ASCCONV END ###': # find end of mrProt in the asc file
+                end_of_asc = True
+    
+            if next_line == '' or next_line[0] == '#':
+                continue
+
+            # regex wizardry: Matches lines like 'a[0].b[2][3].c = "string" # comment'
+            # Note this assumes correct formatting, e.g. does not check whether
+            # brackets match.
+            match = re.match(r'^\s*([a-zA-Z0-9\[\]\._]+)\s*\=\s*(("[^"]*"|\'[^\']\')|(\d+)|([0-9\.e\-]+))\s*((#|\/\/)(.*))?$', next_line)
+    
+            if match:
+                field_name = match[1]
+
+                # Keep track of where to put the value: base[assign_to] = value
+                if end_of_asc:
+                    base = extra
+                else:
+                    base = asc
+
+                assign_to = None
+                
+                # Iterate over every segment of the field name
+                parts = field_name.split('.')
+                for p in parts:
+                    # Update base so final assignement is like: base[assign_to][p] = value
+                    if assign_to != None and assign_to not in base:
+                        base[assign_to] = {}
+                    if assign_to != None:
+                        base = base[assign_to]
+                    
+                    # Iterate over brackets
+                    start = p.find('[')
+                    if start != -1:
+                        name = p[:start]
+                        assign_to = name
+                        
+                        while start != -1:
+                            stop = p.find(']', start)
+                            index = int(p[start+1:stop])
+                            
+                            # Update base so final assignement is like: base[assign_to][p][index] = value
+                            if assign_to not in base:
+                                base[assign_to] = {}
+                            base = base[assign_to]
+                            assign_to = index
+                            
+                            start = p.find('[', stop)
+                    else:
+                        assign_to = p
+
+                # Depending on which regex section matched we can infer the value type
+                if match[3]:
+                    base[assign_to] = match[3][1:-1]
+                elif match[4]:
+                    base[assign_to] = int(match[4])
+                elif match[5]:
+                    base[assign_to] = float(match[5])
+                else:
+                    raise RuntimeError('This should not be reached')
+            elif next_line.find('=') != -1:
+                raise RuntimeError(f'Bug: ASC line with an assignment was not parsed correctly: {next_line}')
+
+    return asc, extra

BIN
LF_scanner/rf_1.h5


BIN
LF_scanner/rf_2.h5


BIN
LF_scanner/rf_3.h5


BIN
LF_scanner/rf_4.h5


BIN
LF_scanner/rf_5.h5


BIN
LF_scanner/rf_6.h5


BIN
LF_scanner/rf_7.h5


BIN
LF_scanner/rf_8.h5


+ 0 - 0
LF_scanner/services/Protocol/__init__.py


+ 14 - 0
LF_scanner/services/Protocol/protocol.py

@@ -0,0 +1,14 @@
+class Protocol:
+    def __init__(self, seq_number, name):
+        self.seq_number = seq_number
+        self.name = name
+
+    def add_sequence(self):
+        return 0
+
+    def delete_sequence(self):
+        return 0
+
+    def interp_sequence(self):
+        return 0
+

+ 4 - 0
LF_scanner/services/__init__.py

@@ -0,0 +1,4 @@
+# =========
+# PACKAGE-LEVEL IMPORTS
+# =========
+from LF_scanner.services import srv_stack

+ 348 - 0
LF_scanner/services/srv_interp.py

@@ -0,0 +1,348 @@
+# -*- coding: utf-8 -*-
+"""
+Created on 05/09/2024
+
+@author: spacexer
+"""
+from LF_scanner import pypulseq as pp
+import numpy as np
+from types import SimpleNamespace
+import json
+from yattag import Doc, indent
+
+
+def seq_file_input(seq_file_name="empty.seq"):
+    seq_input = pp.Sequence()
+    seq_input.read(file_path=seq_file_name)
+    seq_output_dict = seq_input.waveforms_export()
+    return seq_input, seq_output_dict
+
+
+def output_seq(dict, param, path='test1/'):
+    """
+    The interpretation from pypulseq format of sequence to the files needed to analog part of MRI
+
+    :param dict: Dictionary of the impulse sequence pypulseq provided
+
+    :return: files in "grad_output/" directory of every type of amplitudes and time points
+
+    """
+    '''
+    Gradient
+    '''
+    loc_t_gx = gradient_time_convertation(param, dict['t_gx'])
+    loc_t_gy = gradient_time_convertation(param, dict['t_gy'])
+    loc_t_gz = gradient_time_convertation(param, dict['t_gz'])
+    loc_gx = gradient_ampl_convertation(param, dict['gx'])
+    loc_gy = gradient_ampl_convertation(param, dict['gy'])
+    loc_gz = gradient_ampl_convertation(param, dict['gz'])
+    gx_out = duplicates_delete(np.transpose([loc_t_gx, loc_gx]))
+    gy_out = duplicates_delete(np.transpose([loc_t_gy, loc_gy]))
+    gz_out = duplicates_delete(np.transpose([loc_t_gz, loc_gz]))
+    np.savetxt(path + 'gx.txt', gx_out, fmt='%10.0f')
+    np.savetxt(path + 'gy.txt', gy_out, fmt='%10.0f')
+    np.savetxt(path + 'gz.txt', gz_out, fmt='%10.0f')
+    '''
+    Radio
+    '''
+    rf_raster_local = param['rf_raster_time']
+    rf_out = radio_ampl_convertation(dict["rf"], rf_raster=rf_raster_local)
+    file_rf = open(path + 'rf_' + str(rf_raster_local) + '_raster.bin', "wb")
+    for byte in rf_out:
+        file_rf.write(byte.to_bytes(1, byteorder='big', signed=1))
+    file_rf.close()
+
+    '''
+    for radiofreq tests
+    '''
+    # np.savetxt(path + 'rf_time.txt', np.transpose(dict["t_rf"]))
+    # np.savetxt(path + 'rf_ampl.txt', np.transpose(dict["rf"]))
+    # plt.plot(dict["t_rf"][0:2000], np.real(dict["rf"][0:2000]), label="real")
+    # plt.plot(dict["t_rf"][0:2000], np.imag(dict["rf"][0:2000]), label="image")
+    # plt.legend()
+    # plt.show()
+
+
+def radio_ampl_convertation(rf_ampl, rf_raster=1e-6):
+    #TODO: sampling resize to raster different with seqgen
+    out_rf_list = []
+    rf_ampl_raster = 127
+    rf_ampl_maximum = np.abs(max(rf_ampl))
+    proportional_cf_rf = rf_ampl_raster / rf_ampl_maximum
+    for rf_iter in range(len(rf_ampl)):
+        out_rf_list.append(round(rf_ampl[rf_iter].real * proportional_cf_rf))
+        out_rf_list.append(round(rf_ampl[rf_iter].imag * proportional_cf_rf))
+    return out_rf_list
+
+
+def duplicates_delete(loc_list):
+    new_list = [[0] * 2]
+    for i in range(len(loc_list)):
+        if loc_list[i][0] not in np.transpose(new_list)[0]:
+            new_list.append(loc_list[i])
+    return new_list
+
+
+def gradient_time_convertation(param_loc, time_sample):
+    g_raster_time = param_loc['grad_raster_time']
+    time_sample /= g_raster_time
+    return time_sample
+
+
+def gradient_ampl_convertation(param, gradient_herz):
+    """
+    Helper function that convert amplitudes to dimensionless format for machine
+    1 bit for sign, 15 bits of numbers
+
+    :param gradient_herz: 2D array of amplitude and time points in Hz/m
+
+    :return: gradient_dimless: 2D array of dimensionless points
+
+    """
+    # amplitude raster is 32768
+    # maximum grad = 10 mT/m
+    # artificial gap is 1 mT/m so 9 mT/m is now should be split in parts
+    amplitude_max = param['G_amp_max']
+    amplitude_raster = 32767
+    step_Hz_m = amplitude_max / amplitude_raster  # Hz/m step gradient
+    gradient_dimless = gradient_herz / step_Hz_m * 1000
+    # assert abs(any(gradient_dimless)) > 32768, 'Amplitude is higher than expected, check the rate number'
+    return gradient_dimless
+
+
+def adc_correction(blocks_number_loc, seq_input_loc):
+    """
+    Helper function that rise times for correction of ADC events
+    Вспомогательная функция получения времён для коррекции АЦП событий
+    :return:    rise_time: float, stores in pulseq, related to exact type of gradient events
+                    хранится в pulseq, связан с конкретным типом градиентного события
+                fall_time: float, same as rise_time
+                    аналогично rise_time
+    """
+    rise_time, fall_time = None, None
+    is_adc_inside = False
+    for j in range(blocks_number_loc - 1):
+        iterable_block = seq_input_loc.get_block(block_index=j + 1)
+        if iterable_block.adc is not None:
+            is_adc_inside = True
+            rise_time = iterable_block.gx.rise_time
+            fall_time = iterable_block.gx.fall_time
+    if not is_adc_inside:
+        raise Exception("No ADC event found inside sequence")
+    return rise_time, fall_time
+
+
+def adc_event_edges(local_gate_adc):
+    """
+    Helper function that rise numbers of blocks of border  correction of ADC events
+    Вспомогательная функция для получения номеров блоков границ коррекции АЦП событий
+    :return:    num_begin_l:    int, number of time block when adc event starts
+                                номер временного блока начала АЦП события
+                num_finish_l:   int, same but ends
+                                то же, но для окончания
+    """
+    num_begin_l = 0
+    flag_begin = False
+    flag_finish = False
+    num_finish_l = 1
+    for k in range(len(local_gate_adc) - 1):
+        if local_gate_adc[k] != 0 and not flag_begin:
+            num_begin_l = k
+            flag_begin = True
+        if local_gate_adc[k] != 0 and local_gate_adc[k + 1] == 0 and not flag_finish:
+            num_finish_l = k
+            flag_finish = True
+    return num_begin_l, num_finish_l
+
+
+def synchronization(sync_sequence, synchro_block_timer=20e-9, path='test1/', TR_DELAY_L=800e-9, RF_DELAY_L=800e-9,
+                    START_DELAY_L=800e-9):
+    ### MAIN LOOP ###
+    ### ОСНОВНОЙ ЦИКЛ###
+    MIN_BLOCK_TIME = 400e-9
+    assert START_DELAY_L >= RF_DELAY_L
+    assert TR_DELAY_L >= synchro_block_timer
+    assert RF_DELAY_L >= synchro_block_timer
+    number_of_blocks = len(sync_sequence.block_events)
+    gate_adc = [0]
+    gate_rf = [0] * CONST_HACK_RF_DELAY
+    gate_tr_switch = [1]
+    blocks_duration = [START_DELAY_L]
+    adc_times_values = []
+    adc_times_starts = []
+    '''
+    ID RF  GX  GY  GZ  ADC  EXT
+    0    1   2   3   4   5    6
+    '''
+    added_blocks = 0
+    for block_counter in range(number_of_blocks):
+        is_not_adc_block = True
+
+        if sync_sequence.block_events[block_counter + 1][5]:
+            is_not_adc_block = False
+
+            gate_adc.append(0)
+            gate_rf.append(gate_rf[-1])
+            blocks_duration[-1] -= TR_DELAY_L
+            blocks_duration.append(TR_DELAY_L)
+            gate_tr_switch.append(0)
+            added_blocks += 1
+
+            gate_adc.append(1)
+            gate_tr_switch.append(0)
+        else:
+            gate_tr_switch.append(1)
+            gate_adc.append(0)
+
+        if sync_sequence.block_events[block_counter + 1][1] and is_not_adc_block:
+            gate_rf.append(1)
+            gate_adc.append(gate_adc[-1])
+            blocks_duration[-1] -= RF_DELAY_L
+            blocks_duration.append(RF_DELAY_L)
+            gate_tr_switch.append(gate_tr_switch[-1])
+            added_blocks += 1
+
+            gate_rf.append(1)
+
+        else:
+            gate_rf.append(0)
+
+        current_block_dur = sync_sequence.block_durations[block_counter + 1]
+        blocks_duration.append(current_block_dur)
+
+    number_of_blocks += added_blocks
+    # gate_gx = [1] * number_of_blocks
+    # gate_gy = [1] * number_of_blocks
+    # gate_gz = [1] * number_of_blocks
+    '''
+    test1 swap
+    '''
+    # assert any(block_times) < MIN_BLOCK_TIME, "ERROR: events in the current sequence are less than 400 ns"
+
+    doc, tag, text = Doc().tagtext()
+    with tag('root'):
+        with tag('ParamCount'):
+            text(number_of_blocks)
+        with tag('RF'):
+            for RF_iter in range(number_of_blocks):
+                with tag('RF' + str(RF_iter + 1)):
+                    text(gate_rf[RF_iter])
+        with tag('SW'):
+            for SW_iter in range(number_of_blocks):
+                with tag('SW' + str(SW_iter + 1)):
+                    text(gate_tr_switch[SW_iter])
+        with tag('ADC'):
+            for ADC_iter in range(number_of_blocks):
+                if gate_adc[ADC_iter] == 1:
+                    adc_times_values.append(blocks_duration[ADC_iter])
+                    adc_times_starts.append(sum(blocks_duration[0:ADC_iter]))
+                with tag('ADC' + str(ADC_iter + 1)):
+                    text(gate_adc[ADC_iter])
+        with tag('GR'):
+            with tag('GR1'):
+                text(1)
+            for GX_iter in range(1, number_of_blocks):
+                with tag('GR'+ str(GX_iter + 1)):
+                    text(0)
+        with tag('CL'):
+            with tag('CL' + str(1)):
+                text(int(MIN_BLOCK_TIME / synchro_block_timer))
+            for CL_iter in range(1, number_of_blocks):
+                with tag('CL' + str(CL_iter + 1)):
+                    text(int(blocks_duration[CL_iter] / synchro_block_timer))
+
+    result = indent(
+        doc.getvalue(),
+        indentation=' ' * 4,
+        newline='\r'
+    )
+    sync_file = open(path + "sync_v2.xml", "w")
+    sync_file.write(result)
+    sync_file.close()
+
+    picoscope_set(adc_times_values, adc_times_starts)
+
+
+def picoscope_set(adc_val, adc_start, number_of_channels_l=8, sampling_freq_l=4e7, path='test1/'):
+    # sampling rate = 40 MHz = 4e7 1/s
+    # adc_val in seconds
+    adc_out_timings = []
+    for i in adc_val:
+        adc_out_timings.append(int(i * sampling_freq_l))
+
+    doc, tag, text, line = Doc().ttl()
+    with tag('root'):
+        with tag('points'):
+            with tag('title'):
+                text("Points")
+            with tag('value'):
+                text(str(adc_out_timings))
+        with tag('num_of_channels'):
+            with tag('title'):
+                text("Number of Channels")
+            with tag('value'):
+                text(number_of_channels_l)
+        with tag('times'):
+            with tag('title'):
+                text("Times")
+            with tag('value'):
+                text(str(adc_start).format('%.e'))
+        with tag('sample_freq'):
+            with tag('title'):
+                text("Sample Frequency")
+            with tag('value'):
+                text(sampling_freq_l)
+
+    result = indent(
+        doc.getvalue(),
+        indentation=' ' * 4,
+        newline='\r'
+    )
+    sync_file = open(path + "picoscope_params.xml", "w")
+    sync_file.write(result)
+    sync_file.close()
+
+
+if __name__ == "__main__":
+    CONST_HACK_RF_DELAY = 17 * 2 * 2
+    SEQ_INPUT, SEQ_DICT = seq_file_input(seq_file_name='sequences/turbo_FLASH_060924_0444.seq')
+    # SEQ_INPUT, SEQ_DICT = seq_file_input(seq_file_name='sequences/test1_full.seq')
+
+    params_path = 'sequences/'
+    params_filename = "turbo_FLASH_060924_0444"
+    # params_filename = "test1_full"
+
+    file = open(params_path + params_filename + ".json", 'r')
+    SEQ_PARAM = json.load(file)
+    file.close()
+
+    '''
+    integartion of srv_seq_gen
+    '''
+    # SEQ_PARAM = set_limits()
+    # SEQ_INPUT = save_param()
+    # SEQ_DICT = SEQ_INPUT.waveforms_export()
+    '''
+    simulation of inputing the JSON and SEQ
+    '''
+
+    # artificial delays due to construction of the MRI
+    # искусственные задержки из-за тех. особенностей МРТ
+    # RF_dtime = 10 * 1e-6
+    # TR_dtime = 10 * 1e-6
+
+    time_info = SEQ_INPUT.duration()
+    blocks_number = time_info[1]
+    time_dur = time_info[0]
+
+    # output interpretation. all formats of files defined in method
+    # интерпретация выхода. Все форматы файлов определены в методе
+    output_seq(SEQ_DICT, SEQ_PARAM)
+
+    # defining constants of the sequence
+    # определение констант последовательности
+    local_definitions = SEQ_INPUT.definitions
+    ADC_raster = local_definitions['AdcRasterTime']
+    RF_raster = local_definitions['RadiofrequencyRasterTime']
+
+    synchronization(SEQ_INPUT)

+ 0 - 0
LF_scanner/services/srv_stack.py


+ 53 - 0
LF_scanner/setup.py

@@ -0,0 +1,53 @@
+import setuptools
+
+from version import major, minor, revision
+
+
+def _get_long_description() -> str:
+    """
+    Returns long description from `README.md` if possible, else 'Pulseq in Python'.
+
+    Returns
+    -------
+    str
+        Long description of PyPulseq project.
+    """
+    try:  # Unicode decode error on Windows
+        with open("README.md", "r") as fh:
+            long_description = fh.read()
+    except:
+        long_description = "Pulseq in Python"
+    return long_description
+
+
+setuptools.setup(
+    author="Keerthi Sravan Ravi",
+    author_email="ks3621@columbia.edu",
+    classifiers=[
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "License :: OSI Approved :: GNU Affero General Public License v3",
+        "Operating System :: OS Independent",
+    ],
+    description="Pulseq in Python",
+    include_package_data=True,
+    install_requires=[
+        "coverage>=6.2",
+        "matplotlib>=3.5.2",
+        "numpy>=1.19.5",
+        "scipy>=1.8.1",
+        "sigpy==0.1.23",
+    ],
+    license="License :: OSI Approved :: GNU Affero General Public License v3",
+    long_description=_get_long_description(),
+    long_description_content_type="text/markdown",
+    name="pypulseq",
+    packages=setuptools.find_packages(),
+    py_modules=["version"],
+    # package_data for wheel distributions; MANIFEST.in for source distributions
+    package_data={"pypulseq.SAR": ["QGlobal.mat"]},
+    project_urls={"Documentation": "https://pypulseq.readthedocs.io/en/latest/"},
+    python_requires=">=3.6.3",
+    url="https://github.com/imr-framework/pypulseq",
+    version=".".join((str(major), str(minor), str(revision))),
+)

File diff suppressed because it is too large
+ 509 - 0
LF_scanner/t1_SE.ipynb


File diff suppressed because it is too large
+ 626 - 0
LF_scanner/t1_SE_experimental.ipynb


File diff suppressed because it is too large
+ 346 - 0
LF_scanner/t1_SE_final.ipynb


File diff suppressed because it is too large
+ 477 - 0
LF_scanner/t1_SE_final_final.ipynb


File diff suppressed because it is too large
+ 424 - 0
LF_scanner/t1_SE_final_max_grad.ipynb


File diff suppressed because it is too large
+ 499 - 0
LF_scanner/t2_SE_backup.ipynb


File diff suppressed because it is too large
+ 66 - 0
LF_scanner/t2_SE_original.ipynb


File diff suppressed because it is too large
+ 0 - 0
LF_scanner/t2_se_pypulseq_colab.xml


+ 0 - 0
LF_scanner/utilities/__init__.py


+ 16 - 0
LF_scanner/utilities/phase_grad_utils.py

@@ -0,0 +1,16 @@
+import numpy as np
+
+def create_k_steps(k_span, steps):
+    """
+    A function that returns a k_span gradient span with odd and even gradient steps
+    """
+    k_steps = np.array(range(steps+1))
+    
+    if (np.mod(steps,2) == 0):      
+        k_steps = ( k_steps - steps/2 ) / (steps/2)
+    else:
+        k_steps = ( k_steps - (steps+1)/2 ) / (steps/2)
+    
+    k_steps = np.flip(k_steps,0)
+    k_steps = np.delete(k_steps,-1)
+    return k_steps*k_span*0.5        

+ 5 - 0
LF_scanner/version.py

@@ -0,0 +1,5 @@
+from typing import Union
+
+major: int = 1
+minor: int = 4
+revision: Union[int, str] = 0

File diff suppressed because it is too large
+ 448 - 0
LF_scanner/write_se_new.ipynb


File diff suppressed because it is too large
+ 463 - 0
LF_scanner/write_t2_se.ipynb


Some files were not shown because too many files changed in this diff