1 # ##### BEGIN GPL LICENSE BLOCK #####
3 # This program is free software; you can redistribute it and/or
4 # modify it under the terms of the GNU General Public License
5 # as published by the Free Software Foundation; either version 2
6 # of the License, or (at your option) any later version.
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software Foundation,
15 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 # ##### END GPL LICENSE BLOCK #####
20 from blenderkit
import paths
, append_link
, bg_blender
, utils
, download
, search
, rerequests
, upload_bg
, image_utils
22 import sys
, json
, os
, time
32 'resolution_0_5K': 512,
33 'resolution_1K': 1024,
34 'resolution_2K': 2048,
35 'resolution_4K': 4096,
36 'resolution_8K': 8192,
38 rkeys
= list(resolutions
.keys())
40 resolution_props_to_server
= {
42 '512': 'resolution_0_5K',
43 '1024': 'resolution_1K',
44 '2048': 'resolution_2K',
45 '4096': 'resolution_4K',
46 '8192': 'resolution_8K',
51 def get_current_resolution():
53 for i
in bpy
.data
.images
:
54 if i
.name
!= 'Render Result':
55 actres
= max(actres
, i
.size
[0], i
.size
[1])
59 def can_erase_alpha(na
):
61 alpha_sum
= alpha
.sum()
62 if alpha_sum
== alpha
.size
:
63 print('image can have alpha erased')
64 # print(alpha_sum, alpha.size)
65 return alpha_sum
== alpha
.size
68 def is_image_black(na
):
73 rgbsum
= r
.sum() + g
.sum() + b
.sum()
75 # print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
77 print('image can have alpha channel dropped')
88 rgbequal
= rg_equal
.all() and gb_equal
.all()
90 print('image is black and white, can have channels reduced')
95 def numpytoimage(a
, iname
, width
=0, height
=0, channels
=3):
99 for image
in bpy
.data
.images
:
101 if image
.name
[:len(iname
)] == iname
and image
.size
[0] == a
.shape
[0] and image
.size
[1] == a
.shape
[1]:
106 bpy
.ops
.image
.new(name
=iname
, width
=width
, height
=height
, color
=(0, 0, 0, 1), alpha
=True,
107 generated_type
='BLANK', float=True)
109 bpy
.ops
.image
.new(name
=iname
, width
=width
, height
=height
, color
=(0, 0, 0), alpha
=False,
110 generated_type
='BLANK', float=True)
112 for image
in bpy
.data
.images
:
113 # print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
114 if image
.name
[:len(iname
)] == iname
and image
.size
[0] == width
and image
.size
[1] == height
:
117 # dropping this re-shaping code - just doing flat array for speed and simplicity
118 # d = a.shape[0] * a.shape[1]
119 # a = a.swapaxes(0, 1)
121 # a = a.repeat(channels)
123 i
.pixels
.foreach_set(a
) # this gives big speedup!
124 print('\ntime ' + str(time
.time() - t
))
135 size
= width
* height
* i
.channels
136 na
= np
.empty(size
, np
.float32
)
137 i
.pixels
.foreach_get(na
)
139 # dropping this re-shaping code - just doing flat array for speed and simplicity
141 # na = na.reshape(height, width, i.channels)
142 # na = na.swapaxnes(0, 1)
144 # print('\ntime of image to numpy ' + str(time.time() - t))
148 def save_image_safely(teximage
, filepath
):
150 Blender makes it really hard to save images... this is to fix it's crazy bad image saving.
151 Would be worth investigating PIL or similar instead
162 rs
= bpy
.context
.scene
.render
163 ims
= rs
.image_settings
165 orig_file_format
= ims
.file_format
166 orig_quality
= ims
.quality
167 orig_color_mode
= ims
.color_mode
168 orig_compression
= ims
.compression
170 ims
.file_format
= teximage
.file_format
171 if teximage
.file_format
== 'PNG':
172 ims
.color_mode
= 'RGBA'
173 elif teximage
.channels
== 3:
174 ims
.color_mode
= 'RGB'
176 ims
.color_mode
= 'BW'
178 # all pngs with max compression
179 if ims
.file_format
== 'PNG':
180 ims
.compression
= 100
181 # all jpgs brought to reasonable quality
182 if ims
.file_format
== 'JPG':
183 ims
.quality
= JPEG_QUALITY
184 # it's actually very important not to try to change the image filepath and packed file filepath before saving,
185 # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
186 teximage
.save_render(filepath
=bpy
.path
.abspath(filepath
), scene
=bpy
.context
.scene
)
188 teximage
.filepath
= filepath
189 for packed_file
in teximage
.packed_files
:
190 packed_file
.filepath
= filepath
191 teximage
.filepath_raw
= filepath
194 ims
.file_format
= orig_file_format
195 ims
.quality
= orig_quality
196 ims
.color_mode
= orig_color_mode
197 ims
.compression
= orig_compression
200 def extxchange_to_resolution(filepath
):
201 base
, ext
= os
.path
.splitext(filepath
)
202 if ext
in ('.png', '.PNG'):
206 def make_possible_reductions_on_image(teximage
, input_filepath
, do_reductions
=False, do_downscale
=False):
207 '''checks the image and saves it to drive with possibly reduced channels.
208 Also can remove the image from the asset if the image is pure black
209 - it finds it's usages and replaces the inputs where the image is used
210 with zero/black color.
211 currently implemented file type conversions:
214 colorspace
= teximage
.colorspace_settings
.name
215 teximage
.colorspace_settings
.name
= 'Non-Color'
221 rs
= bpy
.context
.scene
.render
222 ims
= rs
.image_settings
224 orig_file_format
= ims
.file_format
225 orig_quality
= ims
.quality
226 orig_color_mode
= ims
.color_mode
227 orig_compression
= ims
.compression
229 # if is_image_black(na):
230 # # just erase the image from the asset here, no need to store black images.
233 # fp = teximage.filepath
236 na
= imagetonumpy(teximage
)
238 if can_erase_alpha(na
):
239 print(teximage
.file_format
)
240 if teximage
.file_format
== 'PNG':
241 print('changing type of image to JPG')
242 base
, ext
= os
.path
.splitext(fp
)
243 teximage
['original_extension'] = ext
245 fp
= fp
.replace('.png', '.jpg')
246 fp
= fp
.replace('.PNG', '.jpg')
248 teximage
.name
= teximage
.name
.replace('.png', '.jpg')
249 teximage
.name
= teximage
.name
.replace('.PNG', '.jpg')
251 teximage
.file_format
= 'JPEG'
252 ims
.quality
= JPEG_QUALITY
253 ims
.color_mode
= 'RGB'
256 ims
.color_mode
= 'BW'
258 ims
.file_format
= teximage
.file_format
260 # all pngs with max compression
261 if ims
.file_format
== 'PNG':
262 ims
.compression
= 100
263 # all jpgs brought to reasonable quality
264 if ims
.file_format
== 'JPG':
265 ims
.quality
= JPEG_QUALITY
270 # it's actually very important not to try to change the image filepath and packed file filepath before saving,
271 # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
272 teximage
.save_render(filepath
=bpy
.path
.abspath(fp
), scene
=bpy
.context
.scene
)
273 if len(teximage
.packed_files
) > 0:
274 teximage
.unpack(method
='REMOVE')
275 teximage
.filepath
= fp
276 teximage
.filepath_raw
= fp
279 teximage
.colorspace_settings
.name
= colorspace
281 ims
.file_format
= orig_file_format
282 ims
.quality
= orig_quality
283 ims
.color_mode
= orig_color_mode
284 ims
.compression
= orig_compression
293 if sx
> minsize
and sy
> minsize
:
297 def upload_resolutions(files
, asset_data
):
298 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
301 "name": asset_data
['name'],
302 "token": preferences
.api_key
,
303 "id": asset_data
['id']
306 uploaded
= upload_bg
.upload_files(upload_data
, files
)
309 bg_blender
.progress('upload finished successfully')
311 bg_blender
.progress('upload failed.')
314 def unpack_asset(data
):
315 utils
.p('unpacking asset')
316 asset_data
= data
['asset_data']
317 # utils.pprint(asset_data)
319 blend_file_name
= os
.path
.basename(bpy
.data
.filepath
)
320 ext
= os
.path
.splitext(blend_file_name
)[1]
322 resolution
= asset_data
.get('resolution', 'blend')
323 # TODO - passing resolution inside asset data might not be the best solution
324 tex_dir_path
= paths
.get_texture_directory(asset_data
, resolution
=resolution
)
325 tex_dir_abs
= bpy
.path
.abspath(tex_dir_path
)
326 if not os
.path
.exists(tex_dir_abs
):
328 os
.mkdir(tex_dir_abs
)
329 except Exception as e
:
331 bpy
.data
.use_autopack
= False
332 for image
in bpy
.data
.images
:
333 if image
.name
!= 'Render Result':
334 # suffix = paths.resolution_suffix(data['suffix'])
335 fp
= get_texture_filepath(tex_dir_path
, image
, resolution
=resolution
)
336 utils
.p('unpacking file', image
.name
)
337 utils
.p(image
.filepath
, fp
)
339 for pf
in image
.packed_files
:
340 pf
.filepath
= fp
# bpy.path.abspath(fp)
341 image
.filepath
= fp
# bpy.path.abspath(fp)
342 image
.filepath_raw
= fp
# bpy.path.abspath(fp)
344 if len(image
.packed_files
) > 0:
345 image
.unpack(method
='REMOVE')
347 bpy
.ops
.wm
.save_mainfile(compress
=False)
348 # now try to delete the .blend1 file
350 os
.remove(bpy
.data
.filepath
+ '1')
351 except Exception as e
:
355 def patch_asset_empty(asset_id
, api_key
):
357 This function patches the asset for the purpose of it getting a reindex.
358 Should be removed once this is fixed on the server and
359 the server is able to reindex after uploads of resolutions
365 url
= paths
.get_api_url() + 'assets/' + str(asset_id
) + '/'
366 headers
= utils
.get_headers(api_key
)
368 r
= rerequests
.patch(url
, json
=upload_data
, headers
=headers
, verify
=True) # files = files,
369 except requests
.exceptions
.RequestException
as e
:
375 def reduce_all_images(target_scale
=1024):
376 for img
in bpy
.data
.images
:
377 if img
.name
!= 'Render Result':
378 print('scaling ', img
.name
, img
.size
[0], img
.size
[1])
379 # make_possible_reductions_on_image(i)
380 if max(img
.size
) > target_scale
:
381 ratio
= float(target_scale
) / float(max(img
.size
))
384 fp
= '//tempimagestorage'
385 # print('generated filename',fp)
386 # for pf in img.packed_files:
387 # pf.filepath = fp # bpy.path.abspath(fp)
390 img
.filepath_raw
= fp
391 print(int(img
.size
[0] * ratio
), int(img
.size
[1] * ratio
))
392 img
.scale(int(img
.size
[0] * ratio
), int(img
.size
[1] * ratio
))
399 def get_texture_filepath(tex_dir_path
, image
, resolution
='blend'):
400 image_file_name
= bpy
.path
.basename(image
.filepath
)
401 if image_file_name
== '':
402 image_file_name
= image
.name
.split('.')[0]
404 suffix
= paths
.resolution_suffix
[resolution
]
406 fp
= os
.path
.join(tex_dir_path
, image_file_name
)
407 # check if there is allready an image with same name and thus also assigned path
408 # (can happen easily with genearted tex sets and more materials)
414 for image1
in bpy
.data
.images
:
415 if image
!= image1
and image1
.filepath
== fpn
:
417 fpleft
, fpext
= os
.path
.splitext(fp
)
418 fpn
= fpleft
+ str(i
).zfill(3) + fpext
426 def generate_lower_resolutions_hdr(asset_data
, fpath
):
427 '''generates lower resolutions for HDR images'''
428 hdr
= bpy
.data
.images
.load(fpath
)
429 actres
= max(hdr
.size
[0], hdr
.size
[1])
430 p2res
= paths
.round_to_closest_resolution(actres
)
431 original_filesize
= os
.path
.getsize(fpath
) # for comparison on the original level
436 dirn
= os
.path
.dirname(fpath
)
437 fn_strip
, ext
= os
.path
.splitext(fpath
)
443 hdr_resolution_filepath
= fn_strip
+ paths
.resolution_suffix
[p2res
] + ext
444 image_utils
.img_save_as(hdr
, filepath
=hdr_resolution_filepath
, file_format
='OPEN_EXR', quality
=20, color_mode
='RGB', compression
=15,
445 view_transform
='Raw', exr_codec
= 'DWAA')
447 if os
.path
.exists(hdr_resolution_filepath
):
448 reduced_filesize
= os
.path
.getsize(hdr_resolution_filepath
)
451 print(f
'HDR size was reduced from {original_filesize} to {reduced_filesize}')
452 if reduced_filesize
< original_filesize
:
453 # this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
454 # usually however the advantage can be big also for same as original resolution
458 "file_path": hdr_resolution_filepath
461 print('prepared resolution file: ', p2res
)
463 if rkeys
.index(p2res
) == 0:
466 p2res
= rkeys
[rkeys
.index(p2res
) - 1]
469 print('uploading resolution files')
470 upload_resolutions(files
, asset_data
)
472 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
473 patch_asset_empty(asset_data
['id'], preferences
.api_key
)
476 def generate_lower_resolutions(data
):
477 asset_data
= data
['asset_data']
478 actres
= get_current_resolution()
479 # first let's skip procedural assets
480 base_fpath
= bpy
.data
.filepath
482 s
= bpy
.context
.scene
484 print('current resolution of the asset ', actres
)
486 p2res
= paths
.round_to_closest_resolution(actres
)
491 # now skip assets that have lowest possible resolution already
493 original_textures_filesize
= 0
494 for i
in bpy
.data
.images
:
495 abspath
= bpy
.path
.abspath(i
.filepath
)
496 if os
.path
.exists(abspath
):
497 original_textures_filesize
+= os
.path
.getsize(abspath
)
501 blend_file_name
= os
.path
.basename(base_fpath
)
503 dirn
= os
.path
.dirname(base_fpath
)
504 fn_strip
, ext
= os
.path
.splitext(blend_file_name
)
506 fn
= fn_strip
+ paths
.resolution_suffix
[p2res
] + ext
507 fpath
= os
.path
.join(dirn
, fn
)
509 tex_dir_path
= paths
.get_texture_directory(asset_data
, resolution
=p2res
)
511 tex_dir_abs
= bpy
.path
.abspath(tex_dir_path
)
512 if not os
.path
.exists(tex_dir_abs
):
513 os
.mkdir(tex_dir_abs
)
515 reduced_textures_filessize
= 0
516 for i
in bpy
.data
.images
:
517 if i
.name
!= 'Render Result':
519 print('scaling ', i
.name
, i
.size
[0], i
.size
[1])
520 fp
= get_texture_filepath(tex_dir_path
, i
, resolution
=p2res
)
522 if p2res
== orig_res
:
523 # first, let's link the image back to the original one.
524 i
['blenderkit_original_path'] = i
.filepath
525 # first round also makes reductions on the image, while keeping resolution
526 make_possible_reductions_on_image(i
, fp
, do_reductions
=True, do_downscale
=False)
529 # lower resolutions only downscale
530 make_possible_reductions_on_image(i
, fp
, do_reductions
=False, do_downscale
=True)
532 abspath
= bpy
.path
.abspath(i
.filepath
)
533 if os
.path
.exists(abspath
):
534 reduced_textures_filessize
+= os
.path
.getsize(abspath
)
540 bpy
.ops
.wm
.save_as_mainfile(filepath
=fpath
, compress
=True, copy
=True)
542 print(f
'textures size was reduced from {original_textures_filesize} to {reduced_textures_filessize}')
543 if reduced_textures_filessize
< original_textures_filesize
:
544 # this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
545 # usually however the advantage can be big also for same as original resolution
552 print('prepared resolution file: ', p2res
)
553 if rkeys
.index(p2res
) == 0:
556 p2res
= rkeys
[rkeys
.index(p2res
) - 1]
557 print('uploading resolution files')
558 upload_resolutions(files
, data
['asset_data'])
559 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
560 patch_asset_empty(data
['asset_data']['id'], preferences
.api_key
)
564 def regenerate_thumbnail_material(data
):
565 # this should re-generate material thumbnail and re-upload it.
566 # first let's skip procedural assets
567 base_fpath
= bpy
.data
.filepath
568 blend_file_name
= os
.path
.basename(base_fpath
)
569 bpy
.ops
.mesh
.primitive_cube_add()
570 aob
= bpy
.context
.active_object
571 bpy
.ops
.object.material_slot_add()
572 aob
.material_slots
[0].material
= bpy
.data
.materials
[0]
573 props
= aob
.active_material
.blenderkit
574 props
.thumbnail_generator_type
= 'BALL'
575 props
.thumbnail_background
= False
576 props
.thumbnail_resolution
= '256'
577 # layout.prop(props, 'thumbnail_generator_type')
578 # layout.prop(props, 'thumbnail_scale')
579 # layout.prop(props, 'thumbnail_background')
580 # if props.thumbnail_background:
581 # layout.prop(props, 'thumbnail_background_lightness')
582 # layout.prop(props, 'thumbnail_resolution')
583 # layout.prop(props, 'thumbnail_samples')
584 # layout.prop(props, 'thumbnail_denoising')
585 # layout.prop(props, 'adaptive_subdivision')
586 # preferences = bpy.context.preferences.addons['blenderkit'].preferences
587 # layout.prop(preferences, "thumbnail_use_gpu")
588 # TODO: here it should call start_material_thumbnailer , but with the wait property on, so it can upload afterwards.
589 bpy
.ops
.object.blenderkit_material_thumbnail()
592 # this does the actual job
597 def assets_db_path():
598 dpath
= os
.path
.dirname(bpy
.data
.filepath
)
599 fpath
= os
.path
.join(dpath
, 'all_assets.json')
603 def get_assets_search():
604 # bpy.app.debug_value = 2
607 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
608 url
= paths
.get_api_url() + 'search/all'
610 while url
is not None:
611 headers
= utils
.get_headers(preferences
.api_key
)
612 print('fetching assets from assets endpoint')
616 r
= rerequests
.get(url
, headers
=headers
)
620 url
= adata
.get('next')
623 except Exception as e
:
625 print('failed to get next')
628 if adata
.get('results') != None:
629 results
.extend(adata
['results'])
631 print(f
'fetched page {i}')
634 fpath
= assets_db_path()
635 with
open(fpath
, 'w', encoding
= 'utf-8') as s
:
636 json
.dump(results
, s
, ensure_ascii
=False, indent
=4)
639 def get_assets_for_resolutions(page_size
=100, max_results
=100000000):
640 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
642 dpath
= os
.path
.dirname(bpy
.data
.filepath
)
643 filepath
= os
.path
.join(dpath
, 'assets_for_resolutions.json')
646 'textureResolutionMax_gte': '100',
647 # 'last_resolution_upload_lt':'2020-9-01'
649 search
.get_search_simple(params
, filepath
=filepath
, page_size
=page_size
, max_results
=max_results
,
650 api_key
=preferences
.api_key
)
654 def get_materials_for_validation(page_size
=100, max_results
=100000000):
655 preferences
= bpy
.context
.preferences
.addons
['blenderkit'].preferences
656 dpath
= os
.path
.dirname(bpy
.data
.filepath
)
657 filepath
= os
.path
.join(dpath
, 'materials_for_validation.json')
660 'asset_type': 'material',
661 'verification_status': 'uploaded'
663 search
.get_search_simple(params
, filepath
=filepath
, page_size
=page_size
, max_results
=max_results
,
664 api_key
=preferences
.api_key
)
668 # This gets all assets in the database through the/assets endpoint. Currently not used, since we use elastic for everything.
669 # def get_assets_list():
670 # bpy.app.debug_value = 2
673 # preferences = bpy.context.preferences.addons['blenderkit'].preferences
674 # url = paths.get_api_url() + 'assets/all'
676 # while url is not None:
677 # headers = utils.get_headers(preferences.api_key)
678 # print('fetching assets from assets endpoint')
682 # r = rerequests.get(url, headers=headers)
686 # url = adata.get('next')
689 # except Exception as e:
691 # print('failed to get next')
694 # if adata.get('results') != None:
695 # results.extend(adata['results'])
697 # print(f'fetched page {i}')
700 # fpath = assets_db_path()
701 # with open(fpath, 'w', encoding = 'utf-8') as s:
702 # json.dump(results, s, ensure_ascii=False, indent=4)
705 def load_assets_list(filepath
):
706 if os
.path
.exists(filepath
):
707 with
open(filepath
, 'r', encoding
='utf-8') as s
:
708 assets
= json
.load(s
)
712 def check_needs_resolutions(a
):
713 if a
['verificationStatus'] == 'validated' and a
['assetType'] in ('material', 'model', 'scene', 'hdr'):
714 # the search itself now picks the right assets so there's no need to filter more than asset types.
715 # TODO needs to check first if the upload date is older than resolution upload date, for that we need resolution upload date.
717 if f
['fileType'].find('resolution') > -1:
724 def download_asset(asset_data
, resolution
='blend', unpack
=False, api_key
=''):
726 Download an asset non-threaded way.
729 asset_data - search result from elastic or assets endpoints from API
733 path to the resulting asset file or None if asset isn't accessible
736 has_url
= download
.get_download_url(asset_data
, download
.get_scene_id(), api_key
, tcom
=None,
739 fpath
= download
.download_file(asset_data
)
740 if fpath
and unpack
and asset_data
['assetType'] != 'hdr':
741 send_to_bg(asset_data
, fpath
, command
='unpack', wait
=True)
747 def generate_resolution_thread(asset_data
, api_key
):
749 A thread that downloads file and only then starts an instance of Blender that generates the resolution
759 fpath
= download_asset(asset_data
, unpack
=True, api_key
=api_key
)
761 if asset_data
['assetType'] != 'hdr':
762 print('send to bg ', fpath
)
763 proc
= send_to_bg(asset_data
, fpath
, command
='generate_resolutions', wait
=True);
765 generate_lower_resolutions_hdr(asset_data
, fpath
)
766 # send_to_bg by now waits for end of the process.
770 def iterate_for_resolutions(filepath
, process_count
=12, api_key
='', do_checks
= True):
771 ''' iterate through all assigned assets, check for those which need generation and send them to res gen'''
772 assets
= load_assets_list(filepath
)
775 for asset_data
in assets
:
776 asset_data
= search
.parse_result(asset_data
)
777 if asset_data
is not None:
779 if not do_checks
or check_needs_resolutions(asset_data
):
780 print('downloading and generating resolution for %s' % asset_data
['name'])
781 # this is just a quick hack for not using original dirs in blendrkit...
782 generate_resolution_thread(asset_data
, api_key
)
783 # thread = threading.Thread(target=generate_resolution_thread, args=(asset_data, api_key))
786 # threads.append(thread)
787 # print('processes ', len(threads))
788 # while len(threads) > process_count - 1:
790 # if not t.is_alive():
794 # print(f'Failed to generate resolution:{asset_data["name"]}')
796 print('not generated resolutions:', asset_data
['name'])
799 def send_to_bg(asset_data
, fpath
, command
='generate_resolutions', wait
=True):
801 Send varioust task to a new blender instance that runs and closes after finishing the task.
802 This function waits until the process finishes.
803 The function tries to set the same bpy.app.debug_value in the instance of Blender that is run.
807 fpath - file that will be processed
808 command - command which should be run in background.
816 'debug_value': bpy
.app
.debug_value
,
817 'asset_data': asset_data
,
820 binary_path
= bpy
.app
.binary_path
821 tempdir
= tempfile
.mkdtemp()
822 datafile
= os
.path
.join(tempdir
+ 'resdata.json')
823 script_path
= os
.path
.dirname(os
.path
.realpath(__file__
))
824 with
open(datafile
, 'w', encoding
= 'utf-8') as s
:
825 json
.dump(data
, s
, ensure_ascii
=False, indent
=4)
827 print('opening Blender instance to do processing - ', command
)
830 proc
= subprocess
.run([
835 "--python", os
.path
.join(script_path
, "resolutions_bg.py"),
837 ], bufsize
=1, stdout
=sys
.stdout
, stdin
=subprocess
.PIPE
, creationflags
=utils
.get_process_flags())
840 # TODO this should be fixed to allow multithreading.
841 proc
= subprocess
.Popen([
846 "--python", os
.path
.join(script_path
, "resolutions_bg.py"),
848 ], bufsize
=1, stdout
=subprocess
.PIPE
, stdin
=subprocess
.PIPE
, creationflags
=utils
.get_process_flags())
852 def write_data_back(asset_data
):
853 '''ensures that the data in the resolution file is the same as in the database.'''
857 def run_bg(datafile
):
858 print('background file operation')
859 with
open(datafile
, 'r',encoding
='utf-8') as f
:
861 bpy
.app
.debug_value
= data
['debug_value']
862 write_data_back(data
['asset_data'])
863 if data
['command'] == 'generate_resolutions':
864 generate_lower_resolutions(data
)
865 elif data
['command'] == 'unpack':
867 elif data
['command'] == 'regen_thumbnail':
868 regenerate_thumbnail_material(data
)
871 # generate_lower_resolutions()
872 # class TestOperator(bpy.types.Operator):
874 # bl_idname = "object.test_anything"
875 # bl_label = "Test Operator"
878 # def poll(cls, context):
881 # def execute(self, context):
882 # iterate_for_resolutions()
883 # return {'FINISHED'}
887 # bpy.utils.register_class(TestOperator)
891 # bpy.utils.unregister_class(TestOperator)