_utils.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. import numpy as np
  2. # Functions for converting
  3. def figure_to_image(figures, close=True):
  4. """Render matplotlib figure to numpy format.
  5. Note that this requires the ``matplotlib`` package.
  6. Args:
  7. figures (matplotlib.pyplot.figure or list of figures): figure or a list of figures
  8. close (bool): Flag to automatically close the figure
  9. Returns:
  10. numpy.array: image in [CHW] order
  11. """
  12. import matplotlib.pyplot as plt
  13. import matplotlib.backends.backend_agg as plt_backend_agg
  14. def render_to_rgb(figure):
  15. canvas = plt_backend_agg.FigureCanvasAgg(figure)
  16. canvas.draw()
  17. data = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
  18. w, h = figure.canvas.get_width_height()
  19. image_hwc = data.reshape([h, w, 4])[:, :, 0:3]
  20. image_chw = np.moveaxis(image_hwc, source=2, destination=0)
  21. if close:
  22. plt.close(figure)
  23. return image_chw
  24. if isinstance(figures, list):
  25. images = [render_to_rgb(figure) for figure in figures]
  26. return np.stack(images)
  27. else:
  28. image = render_to_rgb(figures)
  29. return image
  30. def _prepare_video(V):
  31. """
  32. Converts a 5D tensor [batchsize, time(frame), channel(color), height, width]
  33. into 4D tensor with dimension [time(frame), new_width, new_height, channel].
  34. A batch of images are spreaded to a grid, which forms a frame.
  35. e.g. Video with batchsize 16 will have a 4x4 grid.
  36. """
  37. b, t, c, h, w = V.shape
  38. if V.dtype == np.uint8:
  39. V = np.float32(V) / 255.0
  40. def is_power2(num):
  41. return num != 0 and ((num & (num - 1)) == 0)
  42. # pad to nearest power of 2, all at once
  43. if not is_power2(V.shape[0]):
  44. len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0])
  45. V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)
  46. n_rows = 2 ** ((b.bit_length() - 1) // 2)
  47. n_cols = V.shape[0] // n_rows
  48. V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w))
  49. V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3))
  50. V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c))
  51. return V
  52. def make_grid(I, ncols=8):
  53. # I: N1HW or N3HW
  54. assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
  55. if I.shape[1] == 1:
  56. I = np.concatenate([I, I, I], 1)
  57. assert I.ndim == 4 and I.shape[1] == 3
  58. nimg = I.shape[0]
  59. H = I.shape[2]
  60. W = I.shape[3]
  61. ncols = min(nimg, ncols)
  62. nrows = int(np.ceil(float(nimg) / ncols))
  63. canvas = np.zeros((3, H * nrows, W * ncols), dtype=I.dtype)
  64. i = 0
  65. for y in range(nrows):
  66. for x in range(ncols):
  67. if i >= nimg:
  68. break
  69. canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
  70. i = i + 1
  71. return canvas
  72. # if modality == 'IMG':
  73. # if x.dtype == np.uint8:
  74. # x = x.astype(np.float32) / 255.0
  75. def convert_to_HWC(tensor, input_format): # tensor: numpy array
  76. assert len(set(input_format)) == len(
  77. input_format
  78. ), "You can not use the same dimension shordhand twice. \
  79. input_format: {}".format(
  80. input_format
  81. )
  82. assert len(tensor.shape) == len(
  83. input_format
  84. ), "size of input tensor and input format are different. \
  85. tensor shape: {}, input_format: {}".format(
  86. tensor.shape, input_format
  87. )
  88. input_format = input_format.upper()
  89. if len(input_format) == 4:
  90. index = [input_format.find(c) for c in "NCHW"]
  91. tensor_NCHW = tensor.transpose(index)
  92. tensor_CHW = make_grid(tensor_NCHW)
  93. return tensor_CHW.transpose(1, 2, 0)
  94. if len(input_format) == 3:
  95. index = [input_format.find(c) for c in "HWC"]
  96. tensor_HWC = tensor.transpose(index)
  97. if tensor_HWC.shape[2] == 1:
  98. tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
  99. return tensor_HWC
  100. if len(input_format) == 2:
  101. index = [input_format.find(c) for c in "HW"]
  102. tensor = tensor.transpose(index)
  103. tensor = np.stack([tensor, tensor, tensor], 2)
  104. return tensor