Blender V4.5
cached_image.cc
Go to the documentation of this file.
1/* SPDX-FileCopyrightText: 2023 Blender Authors
2 *
3 * SPDX-License-Identifier: GPL-2.0-or-later */
4
5#include <cstdint>
6#include <memory>
7#include <string>
8
9#include "BLI_hash.hh"
10#include "BLI_listbase.h"
11#include "BLI_string.h"
12#include "BLI_string_ref.hh"
13
14#include "RE_pipeline.h"
15
16#include "GPU_texture.hh"
17
19#include "IMB_imbuf.hh"
20#include "IMB_imbuf_types.hh"
21
22#include "BKE_cryptomatte.hh"
23#include "BKE_image.hh"
24#include "BKE_lib_id.hh"
25
26#include "DNA_ID.h"
27#include "DNA_image_types.h"
28
29#include "COM_cached_image.hh"
30#include "COM_context.hh"
31#include "COM_result.hh"
32#include "COM_utilities.hh"
33
34namespace blender::compositor {
35
36/* --------------------------------------------------------------------
37 * Cached Image Key.
38 */
39
41 const std::string pass_name,
42 const std::string view_name,
43 const int frame)
45{
46}
47
49{
50 return get_default_hash(this->layer_index, this->view_name, this->pass_name, this->frame);
51}
52
54{
55 return a.layer_index == b.layer_index && a.pass_name == b.pass_name &&
56 a.view_name == b.view_name && a.frame == b.frame;
57}
58
59/* --------------------------------------------------------------------
60 * Cached Image.
61 */
62
63/* Get the render layer in the given render result specified by the given image user. */
64static RenderLayer *get_render_layer(const RenderResult *render_result,
65 const ImageUser &image_user)
66{
67 const ListBase *layers = &render_result->layers;
68 return static_cast<RenderLayer *>(BLI_findlink(layers, image_user.layer));
69}
70
71/* Get the index of the pass with the given name in the render layer specified by the given image
72 * user in the given render result. */
73static int get_pass_index(const RenderResult *render_result,
74 const ImageUser &image_user,
75 const char *name)
76{
77 const RenderLayer *render_layer = get_render_layer(render_result, image_user);
78 return BLI_findstringindex(&render_layer->passes, name, offsetof(RenderPass, name));
79}
80
81/* Get the render pass in the given render layer specified by the given image user. */
82static RenderPass *get_render_pass(const RenderLayer *render_layer, const ImageUser &image_user)
83{
84 return static_cast<RenderPass *>(BLI_findlink(&render_layer->passes, image_user.pass));
85}
86
87/* Get the index of the view selected in the image user. If the image is not a multi-view image
88 * or only has a single view, then zero is returned. Otherwise, if the image is a multi-view
89 * image, the index of the selected view is returned. However, note that the value of the view
90 * member of the image user is not the actual index of the view. More specifically, the index 0
91 * is reserved to denote the special mode of operation "All", which dynamically selects the view
92 * whose name matches the view currently being rendered. It follows that the views are then
93 * indexed starting from 1. So for non zero view values, the actual index of the view is the
94 * value of the view member of the image user minus 1. */
95static int get_view_index(const Context &context,
96 const RenderResult *render_result,
97 const ImageUser &image_user)
98{
99 /* The image is not a multi-view image, so just return zero. */
100 if (!render_result) {
101 return 0;
102 }
103
104 const ListBase *views = &render_result->views;
105 /* There is only one view and its index is 0. */
106 if (BLI_listbase_count_at_most(views, 2) < 2) {
107 return 0;
108 }
109
110 const int view = image_user.view;
111 /* The view is not zero, which means it is manually specified and the actual index is then the
112 * view value minus 1. */
113 if (view != 0) {
114 return view - 1;
115 }
116
117 /* Otherwise, the view value is zero, denoting the special mode of operation "All", which finds
118 * the index of the view whose name matches the view currently being rendered. */
119 const char *view_name = context.get_view_name().data();
120 const int matched_view = BLI_findstringindex(views, view_name, offsetof(RenderView, name));
121
122 /* No view matches the view currently being rendered, so fallback to the first view. */
123 if (matched_view == -1) {
124 return 0;
125 }
126
127 return matched_view;
128}
129
130/* Get a copy of the image user that is appropriate to retrieve the needed image buffer from the
131 * image. This essentially sets the appropriate frame, pass, and view that corresponds to the
132 * given context and pass name. If the image is a multi-layer image, then the render_result
133 * argument should be set, otherwise, it is ignored. */
135 const Image *image,
136 const RenderResult *render_result,
137 const ImageUser *image_user,
138 const char *pass_name)
139{
140 ImageUser image_user_for_pass = *image_user;
141
142 /* Set the needed view. */
143 image_user_for_pass.view = get_view_index(context, render_result, image_user_for_pass);
144
145 /* Set the needed pass. */
146 if (BKE_image_is_multilayer(image)) {
147 image_user_for_pass.pass = get_pass_index(render_result, image_user_for_pass, pass_name);
148 BKE_image_multilayer_index(const_cast<RenderResult *>(render_result), &image_user_for_pass);
149 }
150 else {
151 BKE_image_multiview_index(image, &image_user_for_pass);
152 }
153
154 return image_user_for_pass;
155}
156
157/* The image buffer might be stored as an sRGB 8-bit image, while the compositor expects linear
158 * float images, so compute a linear float buffer for the image buffer. This will also do linear
159 * space conversion and alpha pre-multiplication as needed. We could store those images in sRGB GPU
160 * textures and let the GPU do the linear space conversion, but the issues is that we don't control
161 * how the GPU does the conversion and so we get tiny differences across CPU and GPU compositing,
162 * and potentially even across GPUs/Drivers. Further, if alpha pre-multiplication is needed, we
163 * would need to do it ourself, which means alpha pre-multiplication will happen before linear
164 * space conversion, which would produce yet another difference. So we just do everything on the
165 * CPU, since this is already a cached resource.
166 *
167 * To avoid conflicts with other threads, create a new image buffer and assign all the necessary
168 * information to it, with IB_DO_NOT_TAKE_OWNERSHIP for buffers since a deep copy is not needed.
169 *
170 * The caller should free the returned image buffer. */
171static ImBuf *compute_linear_buffer(ImBuf *image_buffer)
172{
173 /* Do not pass the flags to the allocation function to avoid buffer allocation, but assign them
174 * after to retain important information like precision and alpha mode. */
175 ImBuf *linear_image_buffer = IMB_allocImBuf(
176 image_buffer->x, image_buffer->y, image_buffer->planes, 0);
177 linear_image_buffer->flags = image_buffer->flags;
178
179 /* Assign the float buffer if it exists, as well as its number of channels. */
181 linear_image_buffer, image_buffer->float_buffer, IB_DO_NOT_TAKE_OWNERSHIP);
182 linear_image_buffer->channels = image_buffer->channels;
183
184 /* If no float buffer exists, assign it then compute a float buffer from it. This is the main
185 * call of this function. */
186 if (!linear_image_buffer->float_buffer.data) {
188 linear_image_buffer, image_buffer->byte_buffer, IB_DO_NOT_TAKE_OWNERSHIP);
189 IMB_float_from_byte(linear_image_buffer);
190 }
191
192 /* If the image buffer contained compressed data, assign them as well, but only if the color
193 * space of the buffer is linear or data, since we need linear data and can't preprocess the
194 * compressed buffer. If not, we fallback to the float buffer already assigned, which is
195 * guaranteed to exist as a fallback for compressed textures. */
196 const bool is_suitable_compressed_color_space =
199 if (image_buffer->ftype == IMB_FTYPE_DDS && is_suitable_compressed_color_space) {
200 linear_image_buffer->ftype = IMB_FTYPE_DDS;
201 IMB_assign_dds_data(linear_image_buffer, image_buffer->dds_data, IB_DO_NOT_TAKE_OWNERSHIP);
202 }
203
204 return linear_image_buffer;
205}
206
207/* Returns the appropriate result type for the given image buffer, which represents the pass in the
208 * given render result with the given image user. The type is determined based on the channels
209 * count of the buffer for simple images, while channel IDs are also considered for multi-layer
210 * images since 3-channel passes can be RGB without alpha and 4-channel passes can be XYZW 4D
211 * vectors. */
212static ResultType get_result_type(const RenderResult *render_result,
213 const ImageUser &image_user,
214 const ImBuf *image_buffer)
215{
216 if (!render_result) {
217 return Result::float_type(image_buffer->channels);
218 }
219
220 const RenderLayer *render_layer = get_render_layer(render_result, image_user);
221 if (!render_layer) {
222 return Result::float_type(image_buffer->channels);
223 }
224
225 const RenderPass *render_pass = get_render_pass(render_layer, image_user);
226 if (!render_pass) {
227 return Result::float_type(image_buffer->channels);
228 }
229
230 switch (render_pass->channels) {
231 case 1:
232 return ResultType::Float;
233 case 2:
234 return ResultType::Float2;
235 case 3:
236 if (STR_ELEM(render_pass->chan_id, "RGB", "rgb")) {
237 return ResultType::Color;
238 }
239 else {
240 return ResultType::Float3;
241 }
242 case 4:
243 if (STR_ELEM(render_pass->chan_id, "RGBA", "rgba")) {
244 return ResultType::Color;
245 }
246 else {
247 return ResultType::Float4;
248 }
249 default:
250 break;
251 }
252
254 return ResultType::Float;
255}
256
258 Image *image,
259 ImageUser *image_user,
260 const char *pass_name)
261 : result(context)
262{
263 /* We can't retrieve the needed image buffer yet, because we still need to assign the pass index
264 * to the image user in order to acquire the image buffer corresponding to the given pass name.
265 * However, in order to compute the pass index, we need the render result structure of the image
266 * to be initialized. So we first acquire a dummy image buffer since it initializes the image
267 * render result as a side effect. We also use that as a mean of validation, since we can early
268 * exit if the returned image buffer is nullptr. This image buffer can be immediately released.
269 * Since it carries no important information. */
270 ImBuf *initial_image_buffer = BKE_image_acquire_ibuf(image, image_user, nullptr);
271 BKE_image_release_ibuf(image, initial_image_buffer, nullptr);
272 if (!initial_image_buffer) {
273 return;
274 }
275
276 RenderResult *render_result = BKE_image_acquire_renderresult(nullptr, image);
277
278 ImageUser image_user_for_pass = compute_image_user_for_pass(
279 context, image, render_result, image_user, pass_name);
280
281 this->populate_meta_data(render_result, image_user_for_pass);
282
283 BKE_image_release_renderresult(nullptr, image, render_result);
284
285 ImBuf *image_buffer = BKE_image_acquire_ibuf(image, &image_user_for_pass, nullptr);
286 ImBuf *linear_image_buffer = compute_linear_buffer(image_buffer);
287
288 const bool use_half_float = linear_image_buffer->foptions.flag & OPENEXR_HALF;
289 this->result.set_precision(use_half_float ? ResultPrecision::Half : ResultPrecision::Full);
290
291 this->result.set_type(get_result_type(render_result, image_user_for_pass, linear_image_buffer));
292
293 /* For GPU, we wrap the texture returned by IMB module and free it ourselves in destructor. For
294 * CPU, we allocate the result and copy to it from the image buffer. */
295 if (context.use_gpu()) {
296 texture_ = IMB_create_gpu_texture("Image Texture", linear_image_buffer, true, true);
298 this->result.wrap_external(texture_);
299 }
300 else {
301 const int2 size = int2(image_buffer->x, image_buffer->y);
302 Result buffer_result(
303 context, Result::float_type(image_buffer->channels), ResultPrecision::Full);
304 buffer_result.wrap_external(linear_image_buffer->float_buffer.data, size);
305 this->result.allocate_texture(size, false);
306 parallel_for(size, [&](const int2 texel) {
307 this->result.store_pixel_generic_type(texel, buffer_result.load_pixel_generic_type(texel));
308 });
309 }
310
311 IMB_freeImBuf(linear_image_buffer);
312 BKE_image_release_ibuf(image, image_buffer, nullptr);
313}
314
315void CachedImage::populate_meta_data(const RenderResult *render_result,
316 const ImageUser &image_user)
317{
318 if (!render_result) {
319 return;
320 }
321
322 const RenderLayer *render_layer = get_render_layer(render_result, image_user);
323 if (!render_layer) {
324 return;
325 }
326
327 const RenderPass *render_pass = get_render_pass(render_layer, image_user);
328 if (!render_pass) {
329 return;
330 }
331
332 /* We assume the given pass is a Cryptomatte pass and retrieve its full name. If it wasn't a
333 * Cryptomatte pass, the checks below will fail anyways. */
334 const bool is_named_layer = render_layer->name[0] != '\0';
335 const std::string layer_prefix = is_named_layer ? std::string(render_layer->name) + "." : "";
336 const std::string combined_pass_name = layer_prefix + render_pass->name;
337 StringRef cryptomatte_layer_name = bke::cryptomatte::BKE_cryptomatte_extract_layer_name(
338 combined_pass_name);
339
340 struct StampCallbackData {
341 std::string cryptomatte_layer_name;
342 compositor::MetaData *meta_data;
343 };
344
345 /* Go over the stamp data and add any Cryptomatte related meta data. */
346 StampCallbackData callback_data = {cryptomatte_layer_name, &this->result.meta_data};
348 &callback_data,
349 render_result->stamp_data,
350 [](void *user_data, const char *key, char *value, int /*value_length*/) {
351 StampCallbackData *data = static_cast<StampCallbackData *>(user_data);
352
353 const std::string manifest_key = bke::cryptomatte::BKE_cryptomatte_meta_data_key(
354 data->cryptomatte_layer_name, "manifest");
355 if (key == manifest_key) {
356 data->meta_data->cryptomatte.manifest = value;
357 }
358
360 data->cryptomatte_layer_name, "hash");
361 if (key == hash_key) {
362 data->meta_data->cryptomatte.hash = value;
363 }
364
365 const std::string conversion_key = bke::cryptomatte::BKE_cryptomatte_meta_data_key(
366 data->cryptomatte_layer_name, "conversion");
367 if (key == conversion_key) {
368 data->meta_data->cryptomatte.conversion = value;
369 }
370 },
371 false);
372}
373
375{
376 this->result.release();
377 GPU_TEXTURE_FREE_SAFE(texture_);
378}
379
380/* --------------------------------------------------------------------
381 * Cached Image Container.
382 */
383
385{
386 /* First, delete all cached images that are no longer needed. */
387 for (auto &cached_images_for_id : map_.values()) {
388 cached_images_for_id.remove_if([](auto item) { return !item.value->needed; });
389 }
390 map_.remove_if([](auto item) { return item.value.is_empty(); });
391 update_counts_.remove_if([&](auto item) { return !map_.contains(item.key); });
392
393 /* Second, reset the needed status of the remaining cached images to false to ready them to
394 * track their needed status for the next evaluation. */
395 for (auto &cached_images_for_id : map_.values()) {
396 for (auto &value : cached_images_for_id.values()) {
397 value->needed = false;
398 }
399 }
400}
401
403 Image *image,
404 const ImageUser *image_user,
405 const char *pass_name)
406{
407 if (!image || !image_user) {
408 return Result(context);
409 }
410
411 /* Compute the effective frame number of the image if it was animated. */
412 ImageUser image_user_for_frame = *image_user;
413 BKE_image_user_frame_calc(image, &image_user_for_frame, context.get_frame_number());
414
415 /* A view of 0 is a special value that means the current view being rendered so use the context
416 * view name. For other values, just convert the view index into a string and use it as the name,
417 * while this is not correct it works as the cache key and is very fast compared to reading the
418 * views from file and finding out their name. */
419 const std::string view_name = image_user->view == 0 ? std::string(context.get_view_name()) :
420 std::to_string(image_user->view);
421
422 const CachedImageKey key(image_user->layer, pass_name, view_name, image_user_for_frame.framenr);
423
424 const std::string library_key = image->id.lib ? image->id.lib->id.name : "";
425 const std::string id_key = std::string(image->id.name) + library_key;
426 auto &cached_images_for_id = map_.lookup_or_add_default(id_key);
427
428 /* Invalidate the cache for that image if it was changed since it was cached. */
429 if (!cached_images_for_id.is_empty() &&
430 image->runtime->update_count != update_counts_.lookup(id_key))
431 {
432 cached_images_for_id.clear();
433 }
434
435 auto &cached_image = *cached_images_for_id.lookup_or_add_cb(key, [&]() {
436 return std::make_unique<CachedImage>(context, image, &image_user_for_frame, pass_name);
437 });
438
439 /* Store the current update count to later compare to and check if the image changed. */
440 update_counts_.add_overwrite(id_key, image->runtime->update_count);
441
442 cached_image.needed = true;
443 return cached_image.result;
444}
445
446} // namespace blender::compositor
ImBuf * BKE_image_acquire_ibuf(Image *ima, ImageUser *iuser, void **r_lock)
bool BKE_image_is_multilayer(const Image *ima)
void BKE_image_user_frame_calc(Image *ima, ImageUser *iuser, int cfra)
RenderPass * BKE_image_multilayer_index(RenderResult *rr, ImageUser *iuser)
void BKE_image_release_ibuf(Image *ima, ImBuf *ibuf, void *lock)
void BKE_image_release_renderresult(Scene *scene, Image *ima, RenderResult *render_result)
void BKE_stamp_info_callback(void *data, StampData *stamp_data, StampCallback callback, bool noskip)
RenderResult * BKE_image_acquire_renderresult(Scene *scene, Image *ima)
void BKE_image_multiview_index(const Image *ima, ImageUser *iuser)
#define BLI_assert_unreachable()
Definition BLI_assert.h:93
void * BLI_findlink(const ListBase *listbase, int number) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:534
int BLI_listbase_count_at_most(const ListBase *listbase, int count_max) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:511
int BLI_findstringindex(const ListBase *listbase, const char *id, int offset) ATTR_WARN_UNUSED_RESULT ATTR_NONNULL(1)
Definition listbase.cc:780
#define STR_ELEM(...)
Definition BLI_string.h:656
ID and Library types, which are fundamental for SDNA.
static AppView * view
#define GPU_TEXTURE_FREE_SAFE(texture)
void GPU_texture_update_mipmap_chain(GPUTexture *texture)
bool IMB_colormanagement_space_is_scene_linear(const ColorSpace *colorspace)
bool IMB_colormanagement_space_is_data(const ColorSpace *colorspace)
void IMB_assign_dds_data(ImBuf *ibuf, const DDSData &data, ImBufOwnership ownership)
void IMB_assign_float_buffer(ImBuf *ibuf, float *buffer_data, ImBufOwnership ownership)
void IMB_freeImBuf(ImBuf *ibuf)
ImBuf * IMB_allocImBuf(unsigned int x, unsigned int y, unsigned char planes, unsigned int flags)
void IMB_assign_byte_buffer(ImBuf *ibuf, uint8_t *buffer_data, ImBufOwnership ownership)
GPUTexture * IMB_create_gpu_texture(const char *name, ImBuf *ibuf, bool use_high_bitdepth, bool use_premult)
Definition util_gpu.cc:312
void IMB_float_from_byte(ImBuf *ibuf)
@ IMB_FTYPE_DDS
@ IB_DO_NOT_TAKE_OWNERSHIP
uint32_t hash_key
BMesh const char void * data
unsigned long long int uint64_t
static DBVT_INLINE btScalar size(const btDbvtVolume &a)
Definition btDbvt.cpp:52
Result get(Context &context, Image *image, const ImageUser *image_user, const char *pass_name)
CachedImageKey(const int layer_index, const std::string pass_name, const std::string view_name, const int frame)
CachedImage(Context &context, Image *image, ImageUser *image_user, const char *pass_name)
static ResultType float_type(const int channels_count)
Definition result.cc:225
void wrap_external(GPUTexture *texture)
Definition result.cc:448
float4 load_pixel_generic_type(const int2 &texel) const
#define offsetof(t, d)
#define OPENEXR_HALF
if(state< num_states)
StringRef BKE_cryptomatte_extract_layer_name(StringRef render_pass_name)
std::string BKE_cryptomatte_meta_data_key(StringRef layer_name, StringRefNull key_name)
bool operator==(const BokehKernelKey &a, const BokehKernelKey &b)
static ImBuf * compute_linear_buffer(ImBuf *image_buffer)
static int get_pass_index(const RenderResult *render_result, const ImageUser &image_user, const char *name)
static RenderPass * get_render_pass(const RenderLayer *render_layer, const ImageUser &image_user)
static int get_view_index(const Context &context, const RenderResult *render_result, const ImageUser &image_user)
static ImageUser compute_image_user_for_pass(const Context &context, const Image *image, const RenderResult *render_result, const ImageUser *image_user, const char *pass_name)
static RenderLayer * get_render_layer(const RenderResult *render_result, const ImageUser &image_user)
static ResultType get_result_type(const RenderResult *render_result, const ImageUser &image_user, const ImBuf *image_buffer)
void parallel_for(const int2 range, const Function &function)
uint64_t get_default_hash(const T &v, const Args &...args)
Definition BLI_hash.hh:233
VecBase< int32_t, 2 > int2
struct Library * lib
Definition DNA_ID.h:410
char name[66]
Definition DNA_ID.h:415
const ColorSpace * colorspace
DDSData dds_data
ImBufFloatBuffer float_buffer
ImbFormatOptions foptions
ImBufByteBuffer byte_buffer
unsigned char planes
enum eImbFileType ftype
ImageRuntimeHandle * runtime
ID id
Definition DNA_ID.h:505
ListBase passes
Definition RE_pipeline.h:89
char name[RE_MAXNAME]
Definition RE_pipeline.h:81
char chan_id[8]
Definition RE_pipeline.h:51
char name[64]
Definition RE_pipeline.h:50
ListBase views
ListBase layers
struct StampData * stamp_data