__main__.py 3.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. """
  2. Read hippocampal subfield volumes computed by Freesurfer and/or ASHS
  3. and export collected data as CSV.
  4. """
  5. import argparse
  6. import os
  7. import re
  8. import typing
  9. import pandas
  10. from freesurfer_volume_reader import ashs, freesurfer, parse_version_string, \
  11. remove_group_names_from_regex
  12. def concat_dataframes(dataframes: typing.Iterable[pandas.DataFrame]
  13. ) -> pandas.DataFrame: # pragma: no cover
  14. # pylint: disable=unexpected-keyword-arg
  15. if parse_version_string(pandas.__version__) < (0, 23):
  16. return pandas.concat(dataframes, ignore_index=True)
  17. return pandas.concat(dataframes, ignore_index=True, sort=False)
  18. VOLUME_FILE_FINDERS = {
  19. 'ashs': ashs.HippocampalSubfieldsVolumeFile,
  20. # https://github.com/freesurfer/freesurfer/tree/release_6_0_0/HippoSF
  21. 'freesurfer-hipposf': freesurfer.HippocampalSubfieldsVolumeFile,
  22. }
  23. def main():
  24. argparser = argparse.ArgumentParser(description=__doc__,
  25. formatter_class=argparse.RawDescriptionHelpFormatter)
  26. argparser.add_argument('--source-types', nargs='+', default=['freesurfer-hipposf'],
  27. choices=VOLUME_FILE_FINDERS.keys(),
  28. help='default: [freesurfer-hipposf]')
  29. for source_type, file_class in VOLUME_FILE_FINDERS.items():
  30. argparser.add_argument('--{}-filename-regex'.format(source_type),
  31. dest='filename_regex.{}'.format(source_type),
  32. metavar='REGULAR_EXPRESSION', type=re.compile,
  33. default=remove_group_names_from_regex(file_class.FILENAME_PATTERN),
  34. help='default: %(default)s')
  35. argparser.add_argument('--output-format', choices=['csv'], default='csv',
  36. help='default: %(default)s')
  37. subjects_dir_path = os.environ.get('SUBJECTS_DIR', None)
  38. argparser.add_argument('root_dir_paths',
  39. metavar='ROOT_DIR',
  40. nargs='*' if subjects_dir_path else '+',
  41. default=[subjects_dir_path],
  42. help='default: $SUBJECTS_DIR ({})'.format(subjects_dir_path))
  43. args = argparser.parse_args()
  44. filename_regexs = {k[len('filename_regex.'):]: v for k, v in vars(args).items()
  45. if k.startswith('filename_regex.')}
  46. volume_frames = []
  47. for source_type in args.source_types:
  48. find_volume_files = lambda dir_path: VOLUME_FILE_FINDERS[source_type].find(
  49. root_dir_path=dir_path, filename_regex=filename_regexs[source_type])
  50. for root_dir_path in args.root_dir_paths:
  51. for volume_file in find_volume_files(root_dir_path):
  52. volume_frame = volume_file.read_volumes_dataframe()
  53. volume_frame['source_type'] = source_type
  54. volume_frame['source_path'] = volume_file.absolute_path
  55. volume_frames.append(volume_frame)
  56. united_volume_frame = concat_dataframes(volume_frames)
  57. print(united_volume_frame.to_csv(index=False))
  58. if __name__ == '__main__':
  59. main()