logger.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. import datetime
  2. import time
  3. from collections import defaultdict, deque
  4. import torch
  5. from .distributed import reduce_across_processes
  6. class SmoothedValue:
  7. """Track a series of values and provide access to smoothed values over a
  8. window or the global series average.
  9. """
  10. def __init__(self, window_size=20, fmt="{median:.4f} ({global_avg:.4f})"):
  11. self.deque = deque(maxlen=window_size)
  12. self.total = 0.0
  13. self.count = 0
  14. self.fmt = fmt
  15. def update(self, value, n=1):
  16. self.deque.append(value)
  17. self.count += n
  18. self.total += value * n
  19. def synchronize_between_processes(self):
  20. """
  21. Warning: does not synchronize the deque!
  22. """
  23. t = reduce_across_processes([self.count, self.total])
  24. t = t.tolist()
  25. self.count = int(t[0])
  26. self.total = t[1]
  27. @property
  28. def median(self):
  29. d = torch.tensor(list(self.deque))
  30. return d.median().item()
  31. @property
  32. def avg(self):
  33. d = torch.tensor(list(self.deque), dtype=torch.float32)
  34. return d.mean().item()
  35. @property
  36. def global_avg(self):
  37. return self.total / self.count
  38. @property
  39. def max(self):
  40. return max(self.deque)
  41. @property
  42. def value(self):
  43. return self.deque[-1]
  44. def __str__(self):
  45. return self.fmt.format(
  46. median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
  47. )
  48. class MetricLogger:
  49. def __init__(self, delimiter="\t"):
  50. self.meters = defaultdict(SmoothedValue)
  51. self.delimiter = delimiter
  52. def update(self, **kwargs):
  53. for k, v in kwargs.items():
  54. if isinstance(v, torch.Tensor):
  55. v = v.item()
  56. if not isinstance(v, (float, int)):
  57. raise TypeError(
  58. f"This method expects the value of the input arguments to be of type float or int, instead got {type(v)}"
  59. )
  60. self.meters[k].update(v)
  61. def __getattr__(self, attr):
  62. if attr in self.meters:
  63. return self.meters[attr]
  64. if attr in self.__dict__:
  65. return self.__dict__[attr]
  66. raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")
  67. def __str__(self):
  68. loss_str = []
  69. for name, meter in self.meters.items():
  70. loss_str.append(f"{name}: {str(meter)}")
  71. return self.delimiter.join(loss_str)
  72. def synchronize_between_processes(self):
  73. for meter in self.meters.values():
  74. meter.synchronize_between_processes()
  75. def add_meter(self, name, **kwargs):
  76. self.meters[name] = SmoothedValue(**kwargs)
  77. def log_every(self, iterable, print_freq=5, header=None):
  78. i = 0
  79. if not header:
  80. header = ""
  81. start_time = time.time()
  82. end = time.time()
  83. iter_time = SmoothedValue(fmt="{avg:.4f}")
  84. data_time = SmoothedValue(fmt="{avg:.4f}")
  85. space_fmt = ":" + str(len(str(len(iterable)))) + "d"
  86. if torch.cuda.is_available():
  87. log_msg = self.delimiter.join(
  88. [
  89. header,
  90. "[{0" + space_fmt + "}/{1}]",
  91. "eta: {eta}",
  92. "{meters}",
  93. "time: {time}",
  94. "data: {data}",
  95. "max mem: {memory:.0f}",
  96. ]
  97. )
  98. else:
  99. log_msg = self.delimiter.join(
  100. [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
  101. )
  102. MB = 1024.0 * 1024.0
  103. for obj in iterable:
  104. data_time.update(time.time() - end)
  105. yield obj
  106. iter_time.update(time.time() - end)
  107. if print_freq is not None and i % print_freq == 0:
  108. eta_seconds = iter_time.global_avg * (len(iterable) - i)
  109. eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
  110. if torch.cuda.is_available():
  111. print(
  112. log_msg.format(
  113. i,
  114. len(iterable),
  115. eta=eta_string,
  116. meters=str(self),
  117. time=str(iter_time),
  118. data=str(data_time),
  119. memory=torch.cuda.max_memory_allocated() / MB,
  120. )
  121. )
  122. else:
  123. print(
  124. log_msg.format(
  125. i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
  126. )
  127. )
  128. i += 1
  129. end = time.time()
  130. total_time = time.time() - start_time
  131. total_time_str = str(datetime.timedelta(seconds=int(total_time)))
  132. print(f"{header} Total time: {total_time_str}")