__init__.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. """
  2. Python Library to Read FreeSurfer's Cortical Parcellation Anatomical Statistics
  3. ([lh]h.aparc(.*)?.stats)
  4. Freesurfer
  5. https://surfer.nmr.mgh.harvard.edu/
  6. >>> from freesurfer_stats import CorticalParcellationStats
  7. >>> stats = CorticalParcellationStats.read('tests/subjects/fabian/stats/lh.aparc.DKTatlas.stats')
  8. >>> stats.headers['CreationTime'].isoformat()
  9. '2019-05-09T21:05:54+00:00'
  10. >>> stats.headers['cvs_version']
  11. 'Id: mris_anatomical_stats.c,v 1.79 2016/03/14 15:15:34 greve Exp'
  12. >>> stats.headers['cmdline'][:64]
  13. 'mris_anatomical_stats -th3 -mgz -cortex ../label/lh.cortex.label'
  14. >>> stats.hemisphere
  15. >>> stats.whole_brain_measurements['estimated_total_intracranial_volume_mm^3']
  16. 0 1.670487e+06
  17. Name: estimated_total_intracranial_volume_mm^3, dtype: float64
  18. >>> stats.whole_brain_measurements['white_surface_total_area_mm^2']
  19. 0 98553
  20. Name: white_surface_total_area_mm^2, dtype: int64
  21. >>> stats.structural_measurements[['structure_name', 'surface_area_mm^2',
  22. ... 'gray_matter_volume_mm^3']].head()
  23. structure_name surface_area_mm^2 gray_matter_volume_mm^3
  24. 0 caudalanteriorcingulate 1472 4258
  25. 1 caudalmiddlefrontal 3039 8239
  26. 2 cuneus 2597 6722
  27. 3 entorhinal 499 2379
  28. 4 fusiform 3079 9064
  29. Copyright (C) 2019 Fabian Peter Hammerle <fabian@hammerle.me>
  30. This program is free software: you can redistribute it and/or modify
  31. it under the terms of the GNU General Public License as published by
  32. the Free Software Foundation, either version 3 of the License, or
  33. any later version.
  34. This program is distributed in the hope that it will be useful,
  35. but WITHOUT ANY WARRANTY; without even the implied warranty of
  36. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  37. GNU General Public License for more details.
  38. You should have received a copy of the GNU General Public License
  39. along with this program. If not, see <https://www.gnu.org/licenses/>.
  40. """
  41. import datetime
  42. import io
  43. import pathlib
  44. import re
  45. import typing
  46. import numpy
  47. import pandas
  48. from freesurfer_stats.version import __version__
  49. class CorticalParcellationStats:
  50. _HEMISPHERE_PREFIX_TO_SIDE = {"lh": "left", "rh": "right"}
  51. _GENERAL_MEASUREMENTS_REGEX = re.compile(
  52. r"^Measure \S+, ([^,\s]+),? ([^,]+), ([\d\.]+), (\S+)$"
  53. )
  54. _COLUMN_NAMES_NON_SAFE_REGEX = re.compile(r"\s+")
  55. def __init__(self):
  56. self.headers = (
  57. {}
  58. ) # type: typing.Dict[str, typing.Union[str, datetime.datetime]]
  59. self.whole_brain_measurements = (
  60. {}
  61. ) # type: typing.Dict[str, typing.Tuple[float, int]]
  62. self.structural_measurements = {} # type: typing.Union[pandas.DataFrame, None]
  63. @property
  64. def hemisphere(self) -> str:
  65. return self._HEMISPHERE_PREFIX_TO_SIDE[self.headers["hemi"]]
  66. @staticmethod
  67. def _read_header_line(stream: typing.TextIO) -> str:
  68. line = stream.readline()
  69. assert line.startswith("# ")
  70. return line[2:].rstrip()
  71. @classmethod
  72. def _read_column_header_line(
  73. cls, stream: typing.TextIO,
  74. ) -> typing.Tuple[int, str, str]:
  75. line = cls._read_header_line(stream)
  76. assert line.startswith("TableCol"), line
  77. line = line[len("TableCol ") :].lstrip()
  78. index, key, value = line.split(maxsplit=2)
  79. return int(index), key, value
  80. def _read_headers(self, stream: typing.TextIO) -> None:
  81. self.headers = {}
  82. while True:
  83. line = self._read_header_line(stream)
  84. if line.startswith("Measure"):
  85. break
  86. if line:
  87. attr_name, attr_value = line.split(" ", maxsplit=1)
  88. attr_value = attr_value.lstrip()
  89. if attr_name in ["cvs_version", "mrisurf.c-cvs_version"]:
  90. attr_value = attr_value.strip("$").rstrip()
  91. if attr_name == "CreationTime":
  92. attr_dt = datetime.datetime.strptime(
  93. attr_value, "%Y/%m/%d-%H:%M:%S-%Z",
  94. )
  95. if attr_dt.tzinfo is None:
  96. assert attr_value.endswith("-GMT")
  97. attr_dt = attr_dt.replace(tzinfo=datetime.timezone.utc)
  98. attr_value = attr_dt
  99. if attr_name == "AnnotationFileTimeStamp":
  100. attr_value = datetime.datetime.strptime(
  101. attr_value, "%Y/%m/%d %H:%M:%S",
  102. )
  103. self.headers[attr_name] = attr_value
  104. @classmethod
  105. def _format_column_name(cls, name: str, unit: typing.Optional[str]) -> str:
  106. column_name = name.lower()
  107. if unit not in ["unitless", "NA"]:
  108. column_name += "_" + unit
  109. return cls._COLUMN_NAMES_NON_SAFE_REGEX.sub("_", column_name)
  110. @classmethod
  111. def _parse_whole_brain_measurements_line(
  112. cls, line: str,
  113. ) -> typing.Tuple[str, numpy.ndarray]:
  114. match = cls._GENERAL_MEASUREMENTS_REGEX.match(line)
  115. if not match:
  116. raise ValueError("unexpected line: {!r}".format(line))
  117. key, name, value, unit = match.groups()
  118. if (
  119. key == "SupraTentorialVolNotVent"
  120. and name.lower() == "supratentorial volume"
  121. ):
  122. name += " Without Ventricles"
  123. column_name = cls._format_column_name(name, unit)
  124. return column_name, pandas.to_numeric([value], errors="raise")
  125. @classmethod
  126. def _read_column_attributes(
  127. cls, num: int, stream: typing.TextIO,
  128. ) -> typing.List[typing.Dict[str, str]]:
  129. columns = []
  130. for column_index in range(1, int(num) + 1):
  131. column_attrs = {}
  132. for _ in range(3):
  133. column_index_line, key, value = cls._read_column_header_line(stream)
  134. assert column_index_line == column_index
  135. assert key not in column_attrs
  136. column_attrs[key] = value
  137. columns.append(column_attrs)
  138. return columns
  139. def _read(self, stream: typing.TextIO) -> None:
  140. assert (
  141. stream.readline().rstrip()
  142. == "# Table of FreeSurfer cortical parcellation anatomical statistics"
  143. )
  144. assert stream.readline().rstrip() == "#"
  145. self._read_headers(stream)
  146. self.whole_brain_measurements = pandas.DataFrame()
  147. line = self._read_header_line(stream)
  148. while not line.startswith("NTableCols"):
  149. if line.startswith("BrainVolStatsFixed"):
  150. # https://surfer.nmr.mgh.harvard.edu/fswiki/BrainVolStatsFixed
  151. assert (
  152. line.startswith("BrainVolStatsFixed see ")
  153. or line == "BrainVolStatsFixed-NotNeeded because voxelvolume=1mm3"
  154. )
  155. self.headers["BrainVolStatsFixed"] = line[len("BrainVolStatsFixed-") :]
  156. else:
  157. column_name, value = self._parse_whole_brain_measurements_line(line)
  158. assert column_name not in self.whole_brain_measurements, column_name
  159. self.whole_brain_measurements[column_name] = value
  160. line = self._read_header_line(stream)
  161. columns = self._read_column_attributes(int(line[len("NTableCols ") :]), stream)
  162. assert self._read_header_line(stream) == "ColHeaders " + " ".join(
  163. c["ColHeader"] for c in columns
  164. )
  165. self.structural_measurements = pandas.DataFrame(
  166. (line.rstrip().split() for line in stream),
  167. columns=[
  168. self._format_column_name(c["FieldName"], c["Units"]) for c in columns
  169. ],
  170. ).apply(pandas.to_numeric, errors="ignore")
  171. @classmethod
  172. def read(cls, path: typing.Union[str, pathlib.Path]) -> "CorticalParcellationStats":
  173. # path_or_buffer: typing.Union[str, pathlib.Path, typing.IO[typing.AnyStr],
  174. # s3fs.S3File, gcsfs.GCSFile]
  175. # https://github.com/pandas-dev/pandas/blob/v0.25.3/pandas/io/parsers.py#L436
  176. # https://github.com/pandas-dev/pandas/blob/v0.25.3/pandas/_typing.py#L30
  177. (
  178. path_or_buffer,
  179. _,
  180. _,
  181. *instructions,
  182. ) = pandas.io.common.get_filepath_or_buffer(path)
  183. # https://github.com/pandas-dev/pandas/blob/v0.25.3/pandas/io/common.py#L171
  184. # https://github.com/pandas-dev/pandas/blob/v0.21.0/pandas/io/common.py#L171
  185. if instructions: # pragma: no cover
  186. assert len(instructions) == 1, instructions
  187. should_close = instructions[0]
  188. else: # pragma: no cover
  189. should_close = hasattr(path_or_buffer, "close")
  190. stats = cls()
  191. if hasattr(path_or_buffer, "readline"):
  192. # pylint: disable=protected-access
  193. stats._read(io.TextIOWrapper(path_or_buffer))
  194. else:
  195. with open(path_or_buffer, "r") as stream:
  196. # pylint: disable=protected-access
  197. stats._read(stream)
  198. if should_close:
  199. path_or_buffer.close()
  200. return stats