llimage.cpp 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659
  1. /**
  2. * @file llimage.cpp
  3. * @brief Base class for images.
  4. *
  5. * $LicenseInfo:firstyear=2001&license=viewergpl$
  6. *
  7. * Copyright (c) 2001-2009, Linden Research, Inc.
  8. *
  9. * Second Life Viewer Source Code
  10. * The source code in this file ("Source Code") is provided by Linden Lab
  11. * to you under the terms of the GNU General Public License, version 2.0
  12. * ("GPL"), unless you have obtained a separate licensing agreement
  13. * ("Other License"), formally executed by you and Linden Lab. Terms of
  14. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  15. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  16. *
  17. * There are special exceptions to the terms and conditions of the GPL as
  18. * it is applied to this Source Code. View the full text of the exception
  19. * in the file doc/FLOSS-exception.txt in this software distribution, or
  20. * online at
  21. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  22. *
  23. * By copying, modifying or distributing this software, you acknowledge
  24. * that you have read and understood your obligations described above,
  25. * and agree to abide by those obligations.
  26. *
  27. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  28. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  29. * COMPLETENESS OR PERFORMANCE.
  30. * $/LicenseInfo$
  31. */
  32. #include "linden_common.h"
  33. #include <algorithm>
  34. #include "boost/preprocessor.hpp"
  35. #include "llimage.h"
  36. #include "llcolor4u.h"
  37. #include "llmath.h"
  38. #include "llimagebmp.h"
  39. #include "llimagetga.h"
  40. #include "llimagej2c.h"
  41. #include "llimagejpeg.h"
  42. #include "llimagepng.h"
  43. ///////////////////////////////////////////////////////////////////////////////
  44. // Helper macros for generate cycle unwrap templates
  45. ///////////////////////////////////////////////////////////////////////////////
  46. #define _UNROL_GEN_TPL_arg_0(arg)
  47. #define _UNROL_GEN_TPL_arg_1(arg) arg
  48. #define _UNROL_GEN_TPL_comma_0
  49. #define _UNROL_GEN_TPL_comma_1 BOOST_PP_COMMA()
  50. #define _UNROL_GEN_TPL_ARGS_macro(z,n,seq) \
  51. BOOST_PP_CAT(_UNROL_GEN_TPL_arg_, BOOST_PP_MOD(n, 2))(BOOST_PP_SEQ_ELEM(n, seq)) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
  52. #define _UNROL_GEN_TPL_ARGS(seq) \
  53. BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_ARGS_macro, seq)
  54. #define _UNROL_GEN_TPL_TYPE_ARGS_macro(z,n,seq) \
  55. BOOST_PP_SEQ_ELEM(n, seq) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
  56. #define _UNROL_GEN_TPL_TYPE_ARGS(seq) \
  57. BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_TYPE_ARGS_macro, seq)
  58. #define _UNROLL_GEN_TPL_foreach_ee(z, n, seq) \
  59. executor<n>(_UNROL_GEN_TPL_ARGS(seq));
  60. #define _UNROLL_GEN_TPL(name, args_seq, operation, spec) \
  61. template<> struct name<spec> { \
  62. private: \
  63. template<S32 _idx> inline void executor(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
  64. BOOST_PP_SEQ_ENUM(operation) ; \
  65. } \
  66. public: \
  67. inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
  68. BOOST_PP_REPEAT(spec, _UNROLL_GEN_TPL_foreach_ee, args_seq) \
  69. } \
  70. };
  71. #define _UNROLL_GEN_TPL_foreach_seq_macro(r, data, elem) \
  72. _UNROLL_GEN_TPL(BOOST_PP_SEQ_ELEM(0, data), BOOST_PP_SEQ_ELEM(1, data), BOOST_PP_SEQ_ELEM(2, data), elem)
  73. #define UNROLL_GEN_TPL(name, args_seq, operation, spec_seq) \
  74. /*general specialization - should not be implemented!*/ \
  75. template<U8> struct name { inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { /*static_assert(!"Should not be instantiated.");*/ } }; \
  76. BOOST_PP_SEQ_FOR_EACH(_UNROLL_GEN_TPL_foreach_seq_macro, (name)(args_seq)(operation), spec_seq)
  77. ///////////////////////////////////////////////////////////////////////////////
  78. // Generated unrolling loop templates with specializations
  79. ///////////////////////////////////////////////////////////////////////////////
  80. // example: for (c = 0; c < ch; ++c) comp[c] = cx[0] = 0;
  81. UNROLL_GEN_TPL(uroll_zeroze_cx_comp, (S32*)(cx)(S32*)(comp), (cx[_idx] = comp[_idx] = 0), (1)(3)(4));
  82. // example: for (c = 0; c < ch; ++c) comp[c] >>= 4;
  83. UNROLL_GEN_TPL(uroll_comp_rshftasgn_constval, (S32*)(comp)(S32)(cval), (comp[_idx] >>= cval), (1)(3)(4));
  84. // example: for (c = 0; c < ch; ++c) comp[c] = (cx[c] >> 5) * yap;
  85. UNROLL_GEN_TPL(uroll_comp_asgn_cx_rshft_cval_all_mul_val, (S32*)(comp)(S32*)(cx)(S32)(cval)(S32)(val), (comp[_idx] = (cx[_idx] >> cval) * val), (1)(3)(4));
  86. // example: for (c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * Cy;
  87. UNROLL_GEN_TPL(uroll_comp_plusasgn_cx_rshft_cval_all_mul_val, (S32*)(comp)(S32*)(cx)(S32)(cval)(S32)(val), (comp[_idx] += (cx[_idx] >> cval) * val), (1)(3)(4));
  88. // example: for (c = 0; c < ch; ++c) comp[c] += pix[c] * info.xapoints[x];
  89. UNROLL_GEN_TPL(uroll_inp_plusasgn_pix_mul_val, (S32*)(comp)(const U8*)(pix)(S32)(val), (comp[_idx] += pix[_idx] * val), (1)(3)(4));
  90. // example: for (c = 0; c < ch; ++c) cx[c] = pix[c] * info.xapoints[x];
  91. UNROLL_GEN_TPL(uroll_inp_asgn_pix_mul_val, (S32*)(comp)(const U8*)(pix)(S32)(val), (comp[_idx] = pix[_idx] * val), (1)(3)(4));
  92. // example: for (c = 0; c < ch; ++c) comp[c] = ((cx[c] * info.yapoints[y]) + (comp[c] * (256 - info.yapoints[y]))) >> 16;
  93. UNROLL_GEN_TPL(uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r, (S32*)(comp)(S32*)(cx)(S32)(apoint), (comp[_idx] = ((cx[_idx] * apoint) + (comp[_idx] * (256 - apoint))) >> 16), (1)(3)(4));
  94. // example: for (c = 0; c < ch; ++c) comp[c] = (comp[c] + pix[c] * info.yapoints[y]) >> 8;
  95. UNROLL_GEN_TPL(uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r, (S32*)(comp)(const U8*)(pix)(S32)(apoint), (comp[_idx] = (comp[_idx] + pix[_idx] * apoint) >> 8), (1)(3)(4));
  96. // example: for (c = 0; c < ch; ++c) comp[c] = ((comp[c]*(256 - info.xapoints[x])) + ((cx[c] * info.xapoints[x]))) >> 12;
  97. UNROLL_GEN_TPL(uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r, (S32*)(comp)(S32)(apoint)(S32*)(cx), (comp[_idx] = ((comp[_idx] * (256 - apoint)) + (cx[_idx] * apoint)) >> 12), (1)(3)(4));
  98. // example: for (c = 0; c < ch; ++c) *dptr++ = comp[c] & 0xff;
  99. UNROLL_GEN_TPL(uroll_uref_dptr_inc_asgn_comp_and_ff, (U8*&)(dptr)(S32*)(comp), (*dptr++ = comp[_idx] & 0xff), (1)(3)(4));
  100. // example: for (c = 0; c < ch; ++c) *dptr++ = (sptr[info.xpoints[x]*ch + c]) & 0xff;
  101. UNROLL_GEN_TPL(uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff, (U8*&)(dptr)(const U8*)(sptr)(S32)(apoint), (*dptr++ = sptr[apoint + _idx] & 0xff), (1)(3)(4));
  102. // example: for (c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10) & 0xff;
  103. UNROLL_GEN_TPL(uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff, (U8*&)(dptr)(S32*)(comp)(S32)(cval), (*dptr++ = (comp[_idx]>>cval) & 0xff), (1)(3)(4));
  104. template<U8 ch>
  105. class scale_info
  106. {
  107. public:
  108. std::vector<S32> xpoints;
  109. std::vector<const U8*> ystrides;
  110. std::vector<S32> xapoints;
  111. std::vector<S32> yapoints;
  112. S32 xup_yup;
  113. public:
  114. // unrolling loop types declaration
  115. typedef uroll_zeroze_cx_comp<ch> uroll_zeroze_cx_comp_t;
  116. typedef uroll_comp_rshftasgn_constval<ch> uroll_comp_rshftasgn_constval_t;
  117. typedef uroll_comp_asgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_asgn_cx_rshft_cval_all_mul_val_t;
  118. typedef uroll_comp_plusasgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t;
  119. typedef uroll_inp_plusasgn_pix_mul_val<ch> uroll_inp_plusasgn_pix_mul_val_t;
  120. typedef uroll_inp_asgn_pix_mul_val<ch> uroll_inp_asgn_pix_mul_val_t;
  121. typedef uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r<ch> uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t;
  122. typedef uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r<ch> uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t;
  123. typedef uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r<ch> uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t;
  124. typedef uroll_uref_dptr_inc_asgn_comp_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_and_ff_t;
  125. typedef uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff<ch> uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t;
  126. typedef uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t;
  127. public:
  128. scale_info(const U8* src,
  129. U32 srcW, U32 srcH, U32 dstW, U32 dstH, U32 srcStride)
  130. : xup_yup((dstW >= srcW) + ((dstH >= srcH) << 1))
  131. {
  132. calc_x_points(srcW, dstW);
  133. calc_y_strides(src, srcStride, srcH, dstH);
  134. calc_aa_points(srcW, dstW, xup_yup & 1, xapoints);
  135. calc_aa_points(srcH, dstH, xup_yup & 2, yapoints);
  136. }
  137. private:
  138. void calc_x_points(U32 srcW, U32 dstW)
  139. {
  140. xpoints.resize(dstW + 1);
  141. S32 val = dstW >= srcW ? 0x8000 * srcW / dstW - 0x8000 : 0;
  142. S32 inc = (srcW << 16) / dstW;
  143. for (U32 i = 0, j = 0; i < dstW; ++i, ++j, val += inc)
  144. {
  145. xpoints[j] = llmax(0, val >> 16);
  146. }
  147. }
  148. void calc_y_strides(const U8* src, U32 srcStride, U32 srcH, U32 dstH)
  149. {
  150. ystrides.resize(dstH + 1);
  151. S32 val = dstH >= srcH ? 0x8000 * srcH / dstH - 0x8000 : 0;
  152. S32 inc = (srcH << 16) / dstH;
  153. for (U32 i = 0, j = 0; i < dstH; ++i, ++j, val += inc)
  154. {
  155. ystrides[j] = src + llmax(0, val >> 16) * srcStride;
  156. }
  157. }
  158. void calc_aa_points(U32 srcSz, U32 dstSz, bool scale_up, std::vector<S32>& vp)
  159. {
  160. vp.resize(dstSz);
  161. if (scale_up)
  162. {
  163. S32 val = 0x8000 * srcSz / dstSz - 0x8000;
  164. S32 inc = (srcSz << 16) / dstSz;
  165. U32 pos;
  166. for (U32 i = 0, j = 0; i < dstSz; ++i, ++j, val += inc)
  167. {
  168. pos = val >> 16;
  169. if (pos >= srcSz - 1)
  170. {
  171. vp[j] = 0;
  172. }
  173. else
  174. {
  175. S32 tmp = val >> 8;
  176. vp[j] = tmp - (tmp & 0xffffff00);
  177. }
  178. }
  179. }
  180. else
  181. {
  182. S32 inc = (srcSz << 16) / dstSz;
  183. S32 cp = (dstSz << 14) / srcSz + 1;
  184. S32 ap;
  185. for (U32 i = 0, j = 0, val = 0; i < dstSz; ++i, ++j, val += inc)
  186. {
  187. ap = ((0x100 - ((val >> 8) & 0xff)) * cp) >> 8;
  188. vp[j] = ap | (cp << 16);
  189. }
  190. }
  191. }
  192. };
  193. template<U8 ch>
  194. LL_INLINE void bilinear_scale(const U8* src, U32 srcW, U32 srcH, U32 srcStride,
  195. U8* dst, U32 dstW, U32 dstH, U32 dstStride)
  196. {
  197. typedef scale_info<ch> scale_info_t;
  198. scale_info_t info(src, srcW, srcH, dstW, dstH, srcStride);
  199. const U8* sptr;
  200. const U8* pix;
  201. U8* dptr;
  202. U32 x, y;
  203. S32 cx[ch], comp[ch];
  204. if (info.xup_yup == 3)
  205. {
  206. // scale x/y - up
  207. for (y = 0; y < dstH; ++y)
  208. {
  209. dptr = dst + y * dstStride;
  210. sptr = info.ystrides[y];
  211. if (info.yapoints[y] > 0)
  212. {
  213. for (x = 0; x < dstW; ++x)
  214. {
  215. //for (c = 0; c < ch; ++c) cx[c] = comp[c] = 0;
  216. typename scale_info_t::uroll_zeroze_cx_comp_t()(cx, comp);
  217. if (info.xapoints[x] > 0)
  218. {
  219. pix = info.ystrides[y] + info.xpoints[x] * ch;
  220. //for (c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.xapoints[x]);
  221. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
  222. pix += ch;
  223. //for (c = 0; c < ch; ++c) comp[c] += pix[c] * info.xapoints[x];
  224. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, info.xapoints[x]);
  225. pix += srcStride;
  226. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * info.xapoints[x];
  227. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, info.xapoints[x]);
  228. pix -= ch;
  229. //for (c = 0; c < ch; ++c) {
  230. // cx[c] += pix[c] * (256 - info.xapoints[x]);
  231. // comp[c] = ((cx[c] * info.yapoints[y]) + (comp[c] * (256 - info.yapoints[y]))) >> 16;
  232. // *dptr++ = comp[c] & 0xff;
  233. //}
  234. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, 256 - info.xapoints[x]);
  235. typename scale_info_t::uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t()(comp, cx, info.yapoints[y]);
  236. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
  237. }
  238. else
  239. {
  240. pix = info.ystrides[y] + info.xpoints[x] * ch;
  241. //for (c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.yapoints[y]);
  242. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.yapoints[y]);
  243. pix += srcStride;
  244. //for (c = 0; c < ch; ++c) {
  245. // comp[c] = (comp[c] + pix[c] * info.yapoints[y]) >> 8;
  246. // *dptr++ = comp[c] & 0xff;
  247. //}
  248. typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.yapoints[y]);
  249. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
  250. }
  251. }
  252. }
  253. else
  254. {
  255. for (x = 0; x < dstW; ++x)
  256. {
  257. if (info.xapoints[x] > 0)
  258. {
  259. pix = info.ystrides[y] + info.xpoints[x] * ch;
  260. //for (c = 0; c < ch; ++c) {
  261. // comp[c] = pix[c] * (256 - info.xapoints[x]);
  262. // comp[c] = (comp[c] + pix[c] * info.xapoints[x]) >> 8;
  263. // *dptr++ = comp[c] & 0xff;
  264. //}
  265. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
  266. typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.xapoints[x]);
  267. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
  268. }
  269. else
  270. {
  271. //for (c = 0; c < ch; ++c) *dptr++ = (sptr[info.xpoints[x]*ch + c]) & 0xff;
  272. typename scale_info_t::uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t()(dptr, sptr, info.xpoints[x]*ch);
  273. }
  274. }
  275. }
  276. }
  277. }
  278. else if (info.xup_yup == 1)
  279. {
  280. // scaling down vertically
  281. S32 Cy, j;
  282. S32 yap;
  283. for (y = 0; y < dstH; ++y)
  284. {
  285. Cy = info.yapoints[y] >> 16;
  286. yap = info.yapoints[y] & 0xffff;
  287. dptr = dst + y * dstStride;
  288. for (x = 0; x < dstW; ++x)
  289. {
  290. pix = info.ystrides[y] + info.xpoints[x] * ch;
  291. //for (c = 0; c < ch; ++c) comp[c] = pix[c] * yap;
  292. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, yap);
  293. pix += srcStride;
  294. for (j = (1 << 14) - yap; j > Cy; j -= Cy, pix += srcStride)
  295. {
  296. //for (c = 0; c < ch; ++c) comp[c] += pix[c] * Cy;
  297. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cy);
  298. }
  299. if (j > 0)
  300. {
  301. //for (c = 0; c < ch; ++c) comp[c] += pix[c] * j;
  302. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
  303. }
  304. if (info.xapoints[x] > 0)
  305. {
  306. pix = info.ystrides[y] + info.xpoints[x]*ch + ch;
  307. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * yap;
  308. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, yap);
  309. pix += srcStride;
  310. for (j = (1 << 14) - yap; j > Cy; j -= Cy)
  311. {
  312. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * Cy;
  313. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cy);
  314. pix += srcStride;
  315. }
  316. if (j > 0)
  317. {
  318. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * j;
  319. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
  320. }
  321. //for (c = 0; c < ch; ++c) comp[c] = ((comp[c]*(256 - info.xapoints[x])) + ((cx[c] * info.xapoints[x]))) >> 12;
  322. typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.xapoints[x], cx);
  323. }
  324. else
  325. {
  326. //for (c = 0; c < ch; ++c) comp[c] >>= 4;
  327. typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
  328. }
  329. //for (c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10) & 0xff;
  330. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
  331. }
  332. }
  333. }
  334. else if (info.xup_yup == 2)
  335. {
  336. // scaling down horizontally
  337. S32 Cx, j;
  338. S32 xap;
  339. for (y = 0; y < dstH; ++y)
  340. {
  341. dptr = dst + y * dstStride;
  342. for (x = 0; x < dstW; ++x)
  343. {
  344. Cx = info.xapoints[x] >> 16;
  345. xap = info.xapoints[x] & 0xffff;
  346. pix = info.ystrides[y] + info.xpoints[x] * ch;
  347. //for (c = 0; c < ch; ++c) comp[c] = pix[c] * xap;
  348. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, xap);
  349. pix += ch;
  350. for (j = (1 << 14) - xap; j > Cx; j -= Cx)
  351. {
  352. //for (c = 0; c < ch; ++c) comp[c] += pix[c] * Cx;
  353. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cx);
  354. pix += ch;
  355. }
  356. if (j > 0)
  357. {
  358. //for (c = 0; c < ch; ++c) comp[c] += pix[c] * j;
  359. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
  360. }
  361. if (info.yapoints[y] > 0)
  362. {
  363. pix = info.ystrides[y] + info.xpoints[x]*ch + srcStride;
  364. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
  365. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
  366. pix += ch;
  367. for (j = (1 << 14) - xap; j > Cx; j -= Cx)
  368. {
  369. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
  370. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
  371. pix += ch;
  372. }
  373. if (j > 0)
  374. {
  375. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * j;
  376. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
  377. }
  378. //for (c = 0; c < ch; ++c) comp[c] = ((comp[c] * (256 - info.yapoints[y])) + ((cx[c] * info.yapoints[y]))) >> 12;
  379. typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.yapoints[y], cx);
  380. }
  381. else
  382. {
  383. //for (c = 0; c < ch; ++c) comp[c] >>= 4;
  384. typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
  385. }
  386. //for (c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10) & 0xff;
  387. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
  388. }
  389. }
  390. }
  391. else
  392. {
  393. // scale x/y - down
  394. S32 Cx, Cy, i, j;
  395. S32 xap, yap;
  396. for (y = 0; y < dstH; ++y)
  397. {
  398. Cy = info.yapoints[y] >> 16;
  399. yap = info.yapoints[y] & 0xffff;
  400. dptr = dst + y * dstStride;
  401. for (x = 0; x < dstW; ++x)
  402. {
  403. Cx = info.xapoints[x] >> 16;
  404. xap = info.xapoints[x] & 0xffff;
  405. sptr = info.ystrides[y] + info.xpoints[x] * ch;
  406. pix = sptr;
  407. sptr += srcStride;
  408. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
  409. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
  410. pix += ch;
  411. for (i = (1 << 14) - xap; i > Cx; i -= Cx)
  412. {
  413. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
  414. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
  415. pix += ch;
  416. }
  417. if (i > 0)
  418. {
  419. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * i;
  420. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
  421. }
  422. //for (c = 0; c < ch; ++c) comp[c] = (cx[c] >> 5) * yap;
  423. typename scale_info_t::uroll_comp_asgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, yap);
  424. for (j = (1 << 14) - yap; j > Cy; j -= Cy)
  425. {
  426. pix = sptr;
  427. sptr += srcStride;
  428. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
  429. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
  430. pix += ch;
  431. for (i = (1 << 14) - xap; i > Cx; i -= Cx)
  432. {
  433. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
  434. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
  435. pix += ch;
  436. }
  437. if (i > 0)
  438. {
  439. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * i;
  440. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
  441. }
  442. //for (c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * Cy;
  443. typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, Cy);
  444. }
  445. if (j > 0)
  446. {
  447. pix = sptr;
  448. sptr += srcStride;
  449. //for (c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
  450. typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
  451. pix += ch;
  452. for (i = (1 << 14) - xap; i > Cx; i -= Cx)
  453. {
  454. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
  455. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
  456. pix += ch;
  457. }
  458. if (i > 0)
  459. {
  460. //for (c = 0; c < ch; ++c) cx[c] += pix[c] * i;
  461. typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
  462. }
  463. //for (c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * j;
  464. typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, j);
  465. }
  466. //for (c = 0; c < ch; ++c) *dptr++ = (comp[c]>>23) & 0xff;
  467. typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 23);
  468. }
  469. }
  470. }
  471. }
  472. // wrapper
  473. static void bilinear_scale(const U8* src, U32 srcW, U32 srcH, U32 srcCh,
  474. U32 srcStride, U8* dst, U32 dstW, U32 dstH,
  475. U32 dstCh, U32 dstStride)
  476. {
  477. llassert(srcCh == dstCh);
  478. switch (srcCh)
  479. {
  480. case 1:
  481. bilinear_scale<1>(src, srcW, srcH, srcStride, dst, dstW, dstH,
  482. dstStride);
  483. break;
  484. case 3:
  485. bilinear_scale<3>(src, srcW, srcH, srcStride, dst, dstW, dstH,
  486. dstStride);
  487. break;
  488. case 4:
  489. bilinear_scale<4>(src, srcW, srcH, srcStride, dst, dstW, dstH,
  490. dstStride);
  491. break;
  492. default:
  493. llassert(false);
  494. }
  495. }
  496. // Helper function
  497. LL_INLINE static U8 fastFractionalMult(U8 a, U8 b)
  498. {
  499. U32 i = a * b + 128;
  500. return U8((i + (i >> 8)) >> 8);
  501. }
  502. //---------------------------------------------------------------------------
  503. // LLImage
  504. //---------------------------------------------------------------------------
  505. // 5 Mb seems to be the required space to fit all requests from the main
  506. // thread (I get 5136384 as the max requested size during full sessions)...
  507. constexpr size_t TEMP_DATA_BUFFER_SIZE = 5 * 1024 * 1024; // 5 Mb
  508. //static
  509. std::string LLImage::sLastErrorMessage;
  510. LLMutex* LLImage::sErrorMutex = NULL;
  511. #if LL_JEMALLOC
  512. // Initialize with a sane value, in case our allocator gets called before the
  513. // jemalloc arena for it is set.
  514. U32 LLImage::sMallocxFlags = MALLOCX_TCACHE_NONE;
  515. #endif
  516. U8* LLImage::sTempDataBuffer = NULL;
  517. U32 LLImage::sTempDataBufferUsageCount = 0;
  518. U32 LLImage::sDynamicBufferAllocationsCount = 0;
  519. S32 LLImage::sMaxMainThreadTempBufferSizeRequest = 0;
  520. //static
  521. void LLImage::initClass()
  522. {
  523. sErrorMutex = new LLMutex();
  524. #if LL_JEMALLOC
  525. static unsigned int arena = 0;
  526. if (!arena)
  527. {
  528. size_t sz = sizeof(arena);
  529. if (mallctl("arenas.create", &arena, &sz, NULL, 0))
  530. {
  531. llwarns << "Failed to create a new jemalloc arena" << llendl;
  532. }
  533. }
  534. llinfos << "Using jemalloc arena " << arena << " for textures memory"
  535. << llendl;
  536. sMallocxFlags = MALLOCX_ARENA(arena) | MALLOCX_TCACHE_NONE;
  537. #endif
  538. if (!sTempDataBuffer)
  539. {
  540. // Note: use only this buffer from the main thread !
  541. sTempDataBuffer = (U8*)allocate_texture_mem(TEMP_DATA_BUFFER_SIZE *
  542. sizeof(U8));
  543. }
  544. }
  545. //static
  546. void LLImage::cleanupClass()
  547. {
  548. if (sTempDataBuffer)
  549. {
  550. dumpStats();
  551. free_texture_mem(sTempDataBuffer);
  552. sTempDataBuffer = NULL;
  553. }
  554. delete sErrorMutex;
  555. sErrorMutex = NULL;
  556. }
  557. //static
  558. void LLImage::dumpStats()
  559. {
  560. llinfos << "Static temp buffer usages count: "
  561. << sTempDataBufferUsageCount
  562. << " - Dynamic temp buffer allocations count: "
  563. << sDynamicBufferAllocationsCount
  564. << " - Maximum requested size for main thread temporary buffer: "
  565. << sMaxMainThreadTempBufferSizeRequest
  566. << " bytes - Size of static temp buffer: "
  567. << TEMP_DATA_BUFFER_SIZE << " bytes." << llendl;
  568. }
  569. //static
  570. const std::string& LLImage::getLastError()
  571. {
  572. static const std::string noerr("No Error");
  573. return sLastErrorMessage.empty() ? noerr : sLastErrorMessage;
  574. }
  575. //static
  576. void LLImage::setLastError(const std::string& message)
  577. {
  578. if (sErrorMutex)
  579. {
  580. sErrorMutex->lock();
  581. }
  582. sLastErrorMessage = message;
  583. if (sErrorMutex)
  584. {
  585. sErrorMutex->unlock();
  586. }
  587. }
  588. //---------------------------------------------------------------------------
  589. // LLImageBase
  590. //---------------------------------------------------------------------------
  591. LLImageBase::LLImageBase()
  592. : mData(NULL),
  593. mDataSize(0),
  594. mWidth(0),
  595. mHeight(0),
  596. mComponents(0)
  597. {
  598. mBadBufferAllocation = false;
  599. }
  600. //virtual
  601. LLImageBase::~LLImageBase()
  602. {
  603. deleteData(); //virtual
  604. }
  605. //virtual
  606. void LLImageBase::dump()
  607. {
  608. llinfos << "LLImageBase mComponents " << mComponents << " mData " << mData
  609. << " mDataSize " << mDataSize << " mWidth " << mWidth
  610. << " mHeight " << mHeight << llendl;
  611. }
  612. bool LLImageBase::sSizeOverride = false;
  613. //virtual
  614. void LLImageBase::deleteData()
  615. {
  616. if (mData)
  617. {
  618. free_texture_mem(mData);
  619. mData = NULL;
  620. }
  621. mDataSize = 0;
  622. }
  623. //virtual
  624. U8* LLImageBase::allocateData(S32 size)
  625. {
  626. mBadBufferAllocation = false;
  627. if (size < 0)
  628. {
  629. size = mWidth * mHeight * mComponents;
  630. if (size <= 0)
  631. {
  632. llwarns << "Bad dimentions: " << mWidth << "x" << mHeight << "x"
  633. << mComponents << llendl;
  634. mBadBufferAllocation = true;
  635. return NULL;
  636. }
  637. }
  638. else if (size <= 0 || (size > 4096 * 4096 * 16 && sSizeOverride == false))
  639. {
  640. llwarns << "Bad size: " << size << llendl;
  641. mBadBufferAllocation = true;
  642. return NULL;
  643. }
  644. if (!mData || size != mDataSize)
  645. {
  646. if (mData)
  647. {
  648. deleteData(); //virtual
  649. }
  650. mData = (U8*)allocate_texture_mem((size_t)size * sizeof(U8));
  651. if (!mData)
  652. {
  653. llwarns << "Could not allocate image data for requested size: "
  654. << size << llendl;
  655. size = 0;
  656. mWidth = mHeight = 0;
  657. mBadBufferAllocation = true;
  658. return NULL;
  659. }
  660. mDataSize = size;
  661. }
  662. return mData;
  663. }
  664. //virtual
  665. U8* LLImageBase::reallocateData(S32 size)
  666. {
  667. if (mData && mDataSize == size)
  668. {
  669. return mData;
  670. }
  671. U8* new_datap = (U8*)allocate_texture_mem((size_t)size * sizeof(U8));
  672. if (!new_datap)
  673. {
  674. llwarns << "Could not reallocate image data for requested size: "
  675. << size << llendl;
  676. mBadBufferAllocation = true;
  677. return NULL;
  678. }
  679. if (mData)
  680. {
  681. S32 bytes = llmin(mDataSize, size);
  682. memcpy(new_datap, mData, bytes);
  683. free_texture_mem(mData);
  684. }
  685. mData = new_datap;
  686. mDataSize = size;
  687. mBadBufferAllocation = false;
  688. return mData;
  689. }
  690. const U8* LLImageBase::getData() const
  691. {
  692. if (mBadBufferAllocation)
  693. {
  694. llwarns << "Bad memory allocation for the image buffer !" << llendl;
  695. llassert(false);
  696. return NULL;
  697. }
  698. return mData;
  699. }
  700. U8* LLImageBase::getData()
  701. {
  702. if (mBadBufferAllocation)
  703. {
  704. llwarns << "Bad memory allocation for the image buffer !" << llendl;
  705. llassert(false);
  706. return NULL;
  707. }
  708. return mData;
  709. }
  710. bool LLImageBase::isBufferInvalid()
  711. {
  712. return mBadBufferAllocation || mData == NULL;
  713. }
  714. void LLImageBase::setSize(S32 width, S32 height, S32 ncomponents)
  715. {
  716. mWidth = width;
  717. mHeight = height;
  718. mComponents = ncomponents;
  719. }
  720. U8* LLImageBase::allocateDataSize(S32 width, S32 height, S32 ncomp, S32 size)
  721. {
  722. setSize(width, height, ncomp);
  723. return allocateData(size); //virtual
  724. }
  725. //---------------------------------------------------------------------------
  726. // LLImageRaw
  727. //---------------------------------------------------------------------------
  728. LLAtomicS32 LLImageRaw::sRawImageCount(0);
  729. LLImageRaw::LLImageRaw()
  730. : LLImageBase()
  731. {
  732. ++sRawImageCount;
  733. }
  734. LLImageRaw::LLImageRaw(U16 width, U16 height, S8 components)
  735. : LLImageBase()
  736. {
  737. llassert(S32(width) * S32(height) * S32(components) <= MAX_IMAGE_DATA_SIZE);
  738. allocateDataSize(width, height, components);
  739. ++sRawImageCount;
  740. }
  741. LLImageRaw::LLImageRaw(U8* data, U16 width, U16 height, S8 components,
  742. bool no_copy)
  743. : LLImageBase()
  744. {
  745. if (no_copy)
  746. {
  747. setDataAndSize(data, width, height, components);
  748. }
  749. else if (allocateDataSize(width, height, components) && data && getData())
  750. {
  751. memcpy(getData(), data, width * height * components);
  752. }
  753. ++sRawImageCount;
  754. }
  755. LLImageRaw::LLImageRaw(const U8* data, U16 width, U16 height, S8 components)
  756. : LLImageBase()
  757. {
  758. if (allocateDataSize(width, height, components) && data && getData())
  759. {
  760. memcpy(getData(), data, width * height * components);
  761. }
  762. ++sRawImageCount;
  763. }
  764. LLImageRaw::LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only)
  765. : LLImageBase()
  766. {
  767. createFromFile(filename, j2c_lowest_mip_only);
  768. ++sRawImageCount;
  769. }
  770. LLImageRaw::~LLImageRaw()
  771. {
  772. --sRawImageCount;
  773. }
  774. void LLImageRaw::releaseData()
  775. {
  776. LLImageBase::setSize(0, 0, 0);
  777. LLImageBase::setDataAndSize(NULL, 0);
  778. }
  779. void LLImageRaw::setDataAndSize(U8* data, S32 width, S32 height, S8 components)
  780. {
  781. if (data == getData())
  782. {
  783. return;
  784. }
  785. deleteData();
  786. LLImageBase::setSize(width, height, components);
  787. LLImageBase::setDataAndSize(data, width * height * components);
  788. }
  789. bool LLImageRaw::resize(U16 width, U16 height, S8 components)
  790. {
  791. if (getWidth() == width && getHeight() == height &&
  792. getComponents() == components && !isBufferInvalid())
  793. {
  794. return true;
  795. }
  796. // Reallocate the data buffer.
  797. deleteData();
  798. allocateDataSize(width, height, components);
  799. return !isBufferInvalid();
  800. }
  801. U8* LLImageRaw::getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const
  802. {
  803. U8* data = new (std::nothrow) U8[width * height * getComponents()];
  804. // Should do some simple bounds checking
  805. if (!data || !getData())
  806. {
  807. llwarns << "Out of memory. Sub image not retrieved !" << llendl;
  808. return NULL;
  809. }
  810. U32 i;
  811. for (i = y_pos; i < y_pos + height; ++i)
  812. {
  813. memcpy(data + i * width * getComponents(),
  814. getData() +
  815. ((y_pos + i) * getWidth() + x_pos) * getComponents(),
  816. getComponents() * width);
  817. }
  818. return data;
  819. }
  820. bool LLImageRaw::setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,
  821. const U8* data, U32 stride, bool reverse_y)
  822. {
  823. if (!data || !getData())
  824. {
  825. llwarns << "Out of memory. Sub image not set !" << llendl;
  826. return false;
  827. }
  828. // Should do some simple bounds checking
  829. for (U32 i = 0; i < height; ++i)
  830. {
  831. const U32 row = reverse_y ? height - 1 - i : i;
  832. const U32 from_offset = row * (stride == 0 ? width * getComponents()
  833. : stride);
  834. const U32 to_offset = (y_pos + i) * getWidth() + x_pos;
  835. memcpy(getData() + to_offset * getComponents(),
  836. data + from_offset, getComponents() * width);
  837. }
  838. return true;
  839. }
  840. void LLImageRaw::clear(U8 r, U8 g, U8 b, U8 a)
  841. {
  842. // This is fairly bogus, but it will do for now.
  843. if (isBufferInvalid()) return;
  844. S32 count = getWidth() * getHeight();
  845. S8 components = getComponents();
  846. llassert(components <= 4 && count * components == getDataSize());
  847. switch (components)
  848. {
  849. case 1:
  850. {
  851. U8* dst = getData();
  852. std::fill_n(dst, count, r);
  853. break;
  854. }
  855. case 2:
  856. {
  857. U16* dst = (U16*)getData();
  858. #if LL_BIG_ENDIAN
  859. U16 val = U16(g) | (U16)b << 8;
  860. #else
  861. U16 val = U16(r) | (U16)g << 8;
  862. #endif
  863. std::fill_n(dst, count, val);
  864. break;
  865. }
  866. case 3:
  867. {
  868. U8* dst = getData();
  869. for (S32 i = 0; i < count; ++i)
  870. {
  871. *dst++ = r;
  872. *dst++ = g;
  873. *dst++ = b;
  874. }
  875. break;
  876. }
  877. case 4:
  878. {
  879. U32* dst = (U32*)getData();
  880. #if LL_BIG_ENDIAN
  881. //U32 val = U32(a) | U32(b) << 8 | U32(g) << 16 | U32(r) << 24;
  882. U32 val = U32(r) << 8 | g;
  883. val = val << 8 | b;
  884. val = val << 8 | a;
  885. #else
  886. //U32 val = U32(r) | U32(g) << 8 | U32(b) << 16 | U32(a) << 24;
  887. U32 val = U32(a) << 8 | b;
  888. val = val << 8 | g;
  889. val = val << 8 | r;
  890. #endif
  891. std::fill_n(dst, count, val);
  892. break;
  893. }
  894. default:
  895. llwarns_once << "Invalid number of components: " << components
  896. << llendl;
  897. }
  898. }
  899. // Reverses the order of the rows in the image
  900. void LLImageRaw::verticalFlip()
  901. {
  902. S32 row_bytes = getWidth() * getComponents();
  903. U8* line_buffer = getTempBuffer(row_bytes);
  904. if (!line_buffer || !getData())
  905. {
  906. llwarns << "Out of memory. Flipping aborted !" << llendl;
  907. return;
  908. }
  909. S32 mid_row = getHeight() / 2;
  910. for (S32 row = 0; row < mid_row; ++row)
  911. {
  912. U8* row_a_data = getData() + row * row_bytes;
  913. U8* row_b_data = getData() + (getHeight() - 1 - row) * row_bytes;
  914. memcpy(line_buffer, row_a_data, row_bytes);
  915. memcpy(row_a_data, row_b_data, row_bytes);
  916. memcpy(row_b_data, line_buffer, row_bytes);
  917. }
  918. freeTempBuffer(line_buffer);
  919. }
  920. bool LLImageRaw::checkHasTransparentPixels()
  921. {
  922. if (getComponents() != 4)
  923. {
  924. return false;
  925. }
  926. U8* data = getData();
  927. for (U32 i = 0, pixels = getWidth() * getHeight(); i < pixels; ++i)
  928. {
  929. if (data[i * 4 + 3] != 255)
  930. {
  931. return true;
  932. }
  933. }
  934. return false;
  935. }
  936. bool LLImageRaw::optimizeAwayAlpha()
  937. {
  938. if (getComponents() != 4)
  939. {
  940. return false;
  941. }
  942. U8* data = getData();
  943. U32 width = getWidth();
  944. U32 height = getHeight();
  945. U32 pixels = width * height;
  946. // Check alpha channel for all 255
  947. for (U32 i = 0; i < pixels; ++i)
  948. {
  949. if (data[i * 4 + 3] != 255)
  950. {
  951. return false;
  952. }
  953. }
  954. // Alpha channel is 255 for each pixel, make a new copy of data without
  955. // alpha channel. *TODO: vectorize.
  956. U8* new_data = (U8*)allocate_texture_mem(width * height * 3);
  957. for (U32 i = 0; i < pixels; ++i)
  958. {
  959. U32 di = i * 3;
  960. U32 si = i * 4;
  961. new_data[di++] = data[si++];
  962. new_data[di++] = data[si++];
  963. new_data[di] = data[si];
  964. }
  965. setDataAndSize(new_data, width, height, 3);
  966. return true;
  967. }
  968. bool LLImageRaw::makeAlpha()
  969. {
  970. if (getComponents() != 3)
  971. {
  972. return false;
  973. }
  974. U8* data = getData();
  975. U32 width = getWidth();
  976. U32 height = getHeight();
  977. U32 pixels = width * height;
  978. // Alpha channel does not exist, make a new copy of data with an alpha
  979. // channel.
  980. U8* new_data = (U8*)allocate_texture_mem(width * height * 4);
  981. for (U32 i = 0; i < pixels; ++i)
  982. {
  983. U32 di = i * 4;
  984. U32 si = i * 3;
  985. for (U32 j = 0; j < 3; ++j)
  986. {
  987. new_data[di + j] = data[si + j];
  988. }
  989. }
  990. setDataAndSize(new_data, getWidth(), getHeight(), 3);
  991. return true;
  992. }
  993. void LLImageRaw::expandToPowerOfTwo(S32 max_dim, bool scale_image)
  994. {
  995. // Find new sizes
  996. S32 new_width = MIN_IMAGE_SIZE;
  997. S32 new_height = MIN_IMAGE_SIZE;
  998. while (new_width < getWidth() && new_width < max_dim)
  999. {
  1000. new_width <<= 1;
  1001. }
  1002. while (new_height < getHeight() && new_height < max_dim)
  1003. {
  1004. new_height <<= 1;
  1005. }
  1006. scale(new_width, new_height, scale_image);
  1007. }
  1008. void LLImageRaw::contractToPowerOfTwo(S32 max_dim, bool scale_image)
  1009. {
  1010. // Find new sizes
  1011. S32 new_width = max_dim;
  1012. S32 new_height = max_dim;
  1013. while (new_width > getWidth() && new_width > MIN_IMAGE_SIZE)
  1014. {
  1015. new_width >>= 1;
  1016. }
  1017. while (new_height > getHeight() && new_height > MIN_IMAGE_SIZE)
  1018. {
  1019. new_height >>= 1;
  1020. }
  1021. scale(new_width, new_height, scale_image);
  1022. }
  1023. void LLImageRaw::biasedScaleToPowerOfTwo(S32 max_dim)
  1024. {
  1025. // Strong bias towards rounding down (to save bandwidth)
  1026. // No bias would mean THRESHOLD == 1.5f;
  1027. constexpr F32 THRESHOLD = 1.75f;
  1028. // Find new sizes
  1029. S32 larger_w = max_dim; // 2^n >= mWidth
  1030. S32 smaller_w = max_dim; // 2^(n-1) <= mWidth
  1031. while (smaller_w > getWidth() && smaller_w > MIN_IMAGE_SIZE)
  1032. {
  1033. larger_w = smaller_w;
  1034. smaller_w >>= 1;
  1035. }
  1036. S32 new_width = (F32)getWidth() / smaller_w > THRESHOLD ? larger_w
  1037. : smaller_w;
  1038. S32 larger_h = max_dim; // 2^m >= mHeight
  1039. S32 smaller_h = max_dim; // 2^(m-1) <= mHeight
  1040. while (smaller_h > getHeight() && smaller_h > MIN_IMAGE_SIZE)
  1041. {
  1042. larger_h = smaller_h;
  1043. smaller_h >>= 1;
  1044. }
  1045. S32 new_height = (F32)getHeight() / smaller_h > THRESHOLD ? larger_h
  1046. : smaller_h;
  1047. scale(new_width, new_height);
  1048. }
  1049. void LLImageRaw::composite(LLImageRaw* src)
  1050. {
  1051. LLImageRaw* dst = this; // Just for clarity.
  1052. if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
  1053. {
  1054. return;
  1055. }
  1056. if (dst->getComponents() == 3)
  1057. {
  1058. if (src->getWidth() == dst->getWidth() &&
  1059. src->getHeight() == dst->getHeight())
  1060. {
  1061. // No scaling needed
  1062. if (src->getComponents() == 3)
  1063. {
  1064. copyUnscaled(src); // Alpha is one so just copy the data.
  1065. }
  1066. else
  1067. {
  1068. compositeUnscaled4onto3(src);
  1069. }
  1070. }
  1071. else if (src->getComponents() == 3)
  1072. {
  1073. copyScaled(src); // Alpha is one so just copy the data.
  1074. }
  1075. else
  1076. {
  1077. compositeScaled4onto3(src);
  1078. }
  1079. }
  1080. }
  1081. // Src and dst can be any size. Src has 4 components. Dst has 3 components.
  1082. void LLImageRaw::compositeScaled4onto3(LLImageRaw* src)
  1083. {
  1084. LLImageRaw* dst = this; // Just for clarity.
  1085. llassert(src->getComponents() == 4 && dst->getComponents() == 3);
  1086. // Vertical: scale but no composite
  1087. S32 temp_data_size = src->getWidth() * dst->getHeight() *
  1088. src->getComponents();
  1089. U8* temp_buffer = getTempBuffer(temp_data_size);
  1090. if (!temp_buffer || !src->getData() || !dst->getData())
  1091. {
  1092. llwarns << "Out of memory. Scaling aborted !" << llendl;
  1093. return;
  1094. }
  1095. for (S32 col = 0; col < src->getWidth(); ++col)
  1096. {
  1097. copyLineScaled(src->getData() + src->getComponents() * col,
  1098. temp_buffer + src->getComponents() * col,
  1099. src->getHeight(), dst->getHeight(),
  1100. src->getWidth(), src->getWidth());
  1101. }
  1102. // Horizontal: scale and composite
  1103. for (S32 row = 0; row < dst->getHeight(); ++row)
  1104. {
  1105. compositeRowScaled4onto3(temp_buffer +
  1106. src->getComponents() * src->getWidth() * row,
  1107. dst->getData() +
  1108. dst->getComponents() * dst->getWidth() * row,
  1109. src->getWidth(), dst->getWidth());
  1110. }
  1111. // Clean up
  1112. freeTempBuffer(temp_buffer);
  1113. }
  1114. // Src and dst are same size. Src has 4 components. Dst has 3 components.
  1115. void LLImageRaw::compositeUnscaled4onto3(LLImageRaw* src)
  1116. {
  1117. LLImageRaw* dst = this; // Just for clarity.
  1118. llassert(3 == src->getComponents() || 4 == src->getComponents());
  1119. llassert(src->getWidth() == dst->getWidth() &&
  1120. src->getHeight() == dst->getHeight());
  1121. U8* src_data = src->getData();
  1122. U8* dst_data = dst->getData();
  1123. if (!src_data || !dst_data)
  1124. {
  1125. llwarns << "Out of memory, conversion aborted !" << llendl;
  1126. return;
  1127. }
  1128. S32 pixels = getWidth() * getHeight();
  1129. while (pixels--)
  1130. {
  1131. U8 alpha = src_data[3];
  1132. if (alpha)
  1133. {
  1134. if (alpha == 255)
  1135. {
  1136. dst_data[0] = src_data[0];
  1137. dst_data[1] = src_data[1];
  1138. dst_data[2] = src_data[2];
  1139. }
  1140. else
  1141. {
  1142. U8 transparency = 255 - alpha;
  1143. dst_data[0] = fastFractionalMult(dst_data[0], transparency) +
  1144. fastFractionalMult(src_data[0], alpha);
  1145. dst_data[1] = fastFractionalMult(dst_data[1], transparency) +
  1146. fastFractionalMult(src_data[1], alpha);
  1147. dst_data[2] = fastFractionalMult(dst_data[2], transparency) +
  1148. fastFractionalMult(src_data[2], alpha);
  1149. }
  1150. }
  1151. src_data += 4;
  1152. dst_data += 3;
  1153. }
  1154. }
  1155. void LLImageRaw::copyUnscaledAlphaMask(LLImageRaw* src, const LLColor4U& fill)
  1156. {
  1157. LLImageRaw* dst = this; // Just for clarity.
  1158. if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
  1159. {
  1160. return;
  1161. }
  1162. llassert(src->getComponents() == 1 && dst->getComponents() == 4 &&
  1163. src->getWidth() == dst->getWidth() &&
  1164. src->getHeight() == dst->getHeight());
  1165. S32 pixels = getWidth() * getHeight();
  1166. U8* src_data = src->getData();
  1167. U8* dst_data = dst->getData();
  1168. if (!src_data || !dst_data)
  1169. {
  1170. llwarns << "Out of memory, copy aborted !" << llendl;
  1171. return;
  1172. }
  1173. for (S32 i = 0; i < pixels; ++i)
  1174. {
  1175. dst_data[0] = fill.mV[0];
  1176. dst_data[1] = fill.mV[1];
  1177. dst_data[2] = fill.mV[2];
  1178. dst_data[3] = src_data[0];
  1179. src_data += 1;
  1180. dst_data += 4;
  1181. }
  1182. }
  1183. // Fill the buffer with a constant color
  1184. void LLImageRaw::fill(const LLColor4U& color)
  1185. {
  1186. if (isBufferInvalid()) return;
  1187. if (!getData())
  1188. {
  1189. llwarns << "Invalid image buffer." << llendl;
  1190. return;
  1191. }
  1192. S32 pixels = getWidth() * getHeight();
  1193. if (getComponents() == 4)
  1194. {
  1195. U32* data = (U32*)getData();
  1196. U32 rgba = color.asRGBA();
  1197. for (S32 i = 0; i < pixels; ++i)
  1198. {
  1199. data[i] = rgba;
  1200. }
  1201. }
  1202. else if (getComponents() == 3)
  1203. {
  1204. U8* data = getData();
  1205. for (S32 i = 0; i < pixels; ++i)
  1206. {
  1207. data[0] = color.mV[0];
  1208. data[1] = color.mV[1];
  1209. data[2] = color.mV[2];
  1210. data += 3;
  1211. }
  1212. }
  1213. }
  1214. void LLImageRaw::tint(const LLColor3& color)
  1215. {
  1216. if (isBufferInvalid()) return;
  1217. if (!getData())
  1218. {
  1219. llwarns << "Invalid image buffer." << llendl;
  1220. return;
  1221. }
  1222. U32 components = getComponents();
  1223. if (components != 3 && components != 4)
  1224. {
  1225. return;
  1226. }
  1227. U8* data = getData();
  1228. for (U32 i = 0, pixels = getWidth() * getHeight(); i < pixels; ++i)
  1229. {
  1230. F32 c0 = F32(data[0]) * color.mV[0];
  1231. F32 c1 = F32(data[1]) * color.mV[1];
  1232. F32 c2 = F32(data[2]) * color.mV[2];
  1233. data[0] = llclamp((U8)c0, 0, 255);
  1234. data[1] = llclamp((U8)c1, 0, 255);
  1235. data[2] = llclamp((U8)c2, 0, 255);
  1236. data += components;
  1237. }
  1238. }
  1239. LLPointer<LLImageRaw> LLImageRaw::duplicate()
  1240. {
  1241. if (getNumRefs() < 2)
  1242. {
  1243. // nobody else refences to this image, no need to duplicate.
  1244. return this;
  1245. }
  1246. if (!getData())
  1247. {
  1248. llwarns << "Out of memory, image not duplicated !" << llendl;
  1249. return this;
  1250. }
  1251. // make a duplicate
  1252. LLPointer<LLImageRaw> dup = new LLImageRaw(getData(), getWidth(),
  1253. getHeight(), getComponents());
  1254. if (dup->isBufferInvalid())
  1255. {
  1256. // There was an allocation failure: release the LLImageRaw and return
  1257. // a NULL LLPointer<LLImageRaw>:
  1258. dup = NULL;
  1259. }
  1260. return dup;
  1261. }
  1262. // Src and dst can be any size. Src and dst can each have 3 or 4 components.
  1263. void LLImageRaw::copy(LLImageRaw* src)
  1264. {
  1265. LLImageRaw* dst = this; // Just for clarity.
  1266. if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
  1267. {
  1268. return;
  1269. }
  1270. llassert((3 == src->getComponents() || 4 == src->getComponents()) &&
  1271. (3 == dst->getComponents() || 4 == dst->getComponents()));
  1272. if (src->getWidth() == dst->getWidth() &&
  1273. src->getHeight() == dst->getHeight())
  1274. {
  1275. // No scaling needed
  1276. if (src->getComponents() == dst->getComponents())
  1277. {
  1278. copyUnscaled(src);
  1279. }
  1280. else if (3 == src->getComponents())
  1281. {
  1282. copyUnscaled3onto4(src);
  1283. }
  1284. else
  1285. {
  1286. // 4 == src->getComponents()
  1287. copyUnscaled4onto3(src);
  1288. }
  1289. }
  1290. else
  1291. {
  1292. // Scaling needed
  1293. // No scaling needed
  1294. if (src->getComponents() == dst->getComponents())
  1295. {
  1296. copyScaled(src);
  1297. }
  1298. else if (3 == src->getComponents())
  1299. {
  1300. copyScaled3onto4(src);
  1301. }
  1302. else
  1303. {
  1304. // 4 == src->getComponents()
  1305. copyScaled4onto3(src);
  1306. }
  1307. }
  1308. }
  1309. // Src and dst are same size. Src and dst have same number of components.
  1310. void LLImageRaw::copyUnscaled(LLImageRaw* src)
  1311. {
  1312. LLImageRaw* dst = this; // Just for clarity.
  1313. U8* src_data = src->getData();
  1314. U8* dst_data = dst->getData();
  1315. if (!src_data || !dst_data)
  1316. {
  1317. llwarns << "Out of memory, copy aborted !" << llendl;
  1318. return;
  1319. }
  1320. llassert(1 == src->getComponents() || 3 == src->getComponents() ||
  1321. 4 == src->getComponents());
  1322. llassert(src->getComponents() == dst->getComponents());
  1323. llassert(src->getWidth() == dst->getWidth() &&
  1324. src->getHeight() == dst->getHeight());
  1325. memcpy(dst_data, src_data,
  1326. getWidth() * getHeight() * getComponents());
  1327. }
  1328. // Src and dst can be any size. Src has 3 components. Dst has 4 components.
  1329. void LLImageRaw::copyScaled3onto4(LLImageRaw* src)
  1330. {
  1331. llassert(3 == src->getComponents() && 4 == getComponents());
  1332. // Slow, but simple. Optimize later if needed.
  1333. LLImageRaw temp(src->getWidth(), src->getHeight(), 4);
  1334. temp.copyUnscaled3onto4(src);
  1335. copyScaled(&temp);
  1336. }
  1337. // Src and dst can be any size. Src has 4 components. Dst has 3 components.
  1338. void LLImageRaw::copyScaled4onto3(LLImageRaw* src)
  1339. {
  1340. llassert(4 == src->getComponents() && 3 == getComponents());
  1341. // Slow, but simple. Optimize later if needed.
  1342. LLImageRaw temp(src->getWidth(), src->getHeight(), 3);
  1343. temp.copyUnscaled4onto3(src);
  1344. copyScaled(&temp);
  1345. }
  1346. // Src and dst are same size. Src has 4 components. Dst has 3 components.
  1347. void LLImageRaw::copyUnscaled4onto3(LLImageRaw* src)
  1348. {
  1349. LLImageRaw* dst = this; // Just for clarity.
  1350. llassert(3 == dst->getComponents() && 4 == src->getComponents() &&
  1351. src->getWidth() == dst->getWidth() &&
  1352. src->getHeight() == dst->getHeight());
  1353. S32 pixels = getWidth() * getHeight();
  1354. U8* src_data = src->getData();
  1355. U8* dst_data = dst->getData();
  1356. if (!src_data || !dst_data)
  1357. {
  1358. llwarns << "Out of memory, copy aborted !" << llendl;
  1359. return;
  1360. }
  1361. for (S32 i = 0; i < pixels; ++i)
  1362. {
  1363. dst_data[0] = src_data[0];
  1364. dst_data[1] = src_data[1];
  1365. dst_data[2] = src_data[2];
  1366. src_data += 4;
  1367. dst_data += 3;
  1368. }
  1369. }
  1370. // Src and dst are same size. Src has 3 components. Dst has 4 components.
  1371. void LLImageRaw::copyUnscaled3onto4(LLImageRaw* src)
  1372. {
  1373. LLImageRaw* dst = this; // Just for clarity.
  1374. llassert(3 == src->getComponents() && 4 == dst->getComponents() &&
  1375. src->getWidth() == dst->getWidth() &&
  1376. src->getHeight() == dst->getHeight());
  1377. S32 pixels = getWidth() * getHeight();
  1378. U8* src_data = src->getData();
  1379. U8* dst_data = dst->getData();
  1380. if (!src_data || !dst_data)
  1381. {
  1382. llwarns << "Out of memory, copy aborted !" << llendl;
  1383. return;
  1384. }
  1385. for (S32 i = 0; i < pixels; ++i)
  1386. {
  1387. dst_data[0] = src_data[0];
  1388. dst_data[1] = src_data[1];
  1389. dst_data[2] = src_data[2];
  1390. dst_data[3] = 255;
  1391. src_data += 3;
  1392. dst_data += 4;
  1393. }
  1394. }
  1395. U8* LLImageRaw::getTempBuffer(S32 size)
  1396. {
  1397. bool from_main_thread = is_main_thread();
  1398. if (from_main_thread &&
  1399. size > LLImage::sMaxMainThreadTempBufferSizeRequest)
  1400. {
  1401. LLImage::sMaxMainThreadTempBufferSizeRequest = size;
  1402. }
  1403. if (from_main_thread && LLImage::sTempDataBuffer &&
  1404. (size_t)size <= TEMP_DATA_BUFFER_SIZE)
  1405. {
  1406. // In order to avoid many memory reallocations resulting in virtual
  1407. // address space fragmentation, we use, for the main thread, a static
  1408. // buffer as a temporary storage whenever possible.
  1409. ++LLImage::sTempDataBufferUsageCount;
  1410. return LLImage::sTempDataBuffer;
  1411. }
  1412. else
  1413. {
  1414. ++LLImage::sDynamicBufferAllocationsCount;
  1415. U8* tmp = (U8*)allocate_texture_mem((size_t)size * sizeof(U8));
  1416. return tmp;
  1417. }
  1418. }
  1419. void LLImageRaw::freeTempBuffer(U8* addr)
  1420. {
  1421. if (addr != LLImage::sTempDataBuffer)
  1422. {
  1423. free_texture_mem((void*)addr);
  1424. }
  1425. }
  1426. // Src and dst can be any size. Src and dst have same number of components.
  1427. void LLImageRaw::copyScaled(LLImageRaw* src)
  1428. {
  1429. LLImageRaw* dst = this; // Just for clarity.
  1430. if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
  1431. {
  1432. return;
  1433. }
  1434. llassert_always(1 == src->getComponents() || 3 == src->getComponents() ||
  1435. 4 == src->getComponents());
  1436. llassert_always(src->getComponents() == dst->getComponents());
  1437. U8* src_data = src->getData();
  1438. U8* dst_data = dst->getData();
  1439. if (!src_data || !dst_data)
  1440. {
  1441. llwarns << "Out of memory, copy aborted !" << llendl;
  1442. return;
  1443. }
  1444. if (src->getWidth() == dst->getWidth() &&
  1445. src->getHeight() == dst->getHeight())
  1446. {
  1447. memcpy(dst_data, src_data, getWidth() * getHeight() * getComponents());
  1448. return;
  1449. }
  1450. S32 src_width = src->getWidth();
  1451. S32 src_components = src->getComponents();
  1452. S32 dst_width = dst->getWidth();
  1453. S32 dst_components = dst->getComponents();
  1454. bilinear_scale(src->getData(), src_width, src->getHeight(), src_components,
  1455. src_width * src_components, dst->getData(), dst_width,
  1456. dst->getHeight(), dst_components, dst_width * dst_components);
  1457. }
  1458. bool LLImageRaw::scale(S32 new_width, S32 new_height, bool scale_image_data)
  1459. {
  1460. if (isBufferInvalid()) return false;
  1461. S32 components = getComponents();
  1462. if (components != 1 && components != 3 && components != 4)
  1463. {
  1464. llwarns << "Invalid number of components: " << components
  1465. << ". Aborted." << llendl;
  1466. return false;
  1467. }
  1468. S32 old_width = getWidth();
  1469. S32 old_height = getHeight();
  1470. if (old_width == new_width && old_height == new_height)
  1471. {
  1472. return true; // Nothing to do.
  1473. }
  1474. // Reallocate the data buffer.
  1475. if (!getData())
  1476. {
  1477. llwarns << "Out of memory. Scaling aborted !" << llendl;
  1478. return false;
  1479. }
  1480. if (scale_image_data)
  1481. {
  1482. S32 new_data_size = new_width * new_height * components;
  1483. if (new_data_size <= 0)
  1484. {
  1485. llwarns << "Non-positive data size: width = " << new_width
  1486. << " - height = " << new_height << " - components = "
  1487. << components << "; aborting !" << llendl;
  1488. llassert(false);
  1489. return false;
  1490. }
  1491. U8* new_data = (U8*)allocate_texture_mem(new_data_size);
  1492. if (!new_data)
  1493. {
  1494. llwarns << "Out of memory while rescaling for requested size: "
  1495. << new_data_size << llendl;
  1496. return false;
  1497. }
  1498. components = getComponents();
  1499. bilinear_scale(getData(), old_width, old_height, components,
  1500. old_width * components, new_data, new_width, new_height,
  1501. components, new_width * components);
  1502. setDataAndSize(new_data, new_width, new_height, components);
  1503. }
  1504. else
  1505. {
  1506. // Copy out existing image data
  1507. S32 temp_data_size = old_width * old_height * getComponents();
  1508. U8* temp_buffer = getTempBuffer(temp_data_size);
  1509. if (!temp_buffer)
  1510. {
  1511. llwarns << "Out of memory while rescaling: old (w, h, c) = ("
  1512. << old_width << ", " << old_height << ", "
  1513. << components << "); new (w, h, c) = ("
  1514. << new_width << ", " << new_height << ", "
  1515. << getComponents() << ")" << llendl;
  1516. return false;
  1517. }
  1518. memcpy(temp_buffer, getData(), temp_data_size);
  1519. // Allocate new image data, will delete old data
  1520. components = getComponents();
  1521. U8* new_buffer = allocateDataSize(new_width, new_height, components);
  1522. if (!new_buffer)
  1523. {
  1524. llwarns << "Out of memory while rescaling: old (w, h, c) = ("
  1525. << old_width << ", " << old_height << ", "
  1526. << components << "); new (w, h, c) = ("
  1527. << new_width << ", " << new_height << ", "
  1528. << getComponents() << ")" << llendl;
  1529. freeTempBuffer(temp_buffer);
  1530. return false;
  1531. }
  1532. components = getComponents();
  1533. for (S32 row = 0; row < new_height; ++row)
  1534. {
  1535. if (row < old_height)
  1536. {
  1537. memcpy(new_buffer + new_width * row * components,
  1538. temp_buffer + old_width * row * components,
  1539. components * llmin(old_width, new_width));
  1540. if (old_width < new_width)
  1541. {
  1542. // Pad out rest of row with black
  1543. memset(new_buffer + components *
  1544. (new_width * row + old_width),
  1545. 0, components * (new_width - old_width));
  1546. }
  1547. }
  1548. else
  1549. {
  1550. // Pad remaining rows with black
  1551. memset(new_buffer + new_width * row * components, 0,
  1552. new_width * components);
  1553. }
  1554. }
  1555. // Clean up
  1556. freeTempBuffer(temp_buffer);
  1557. }
  1558. return true;
  1559. }
  1560. LLPointer<LLImageRaw> LLImageRaw::scaled(S32 new_width, S32 new_height)
  1561. {
  1562. LLPointer<LLImageRaw> result;
  1563. if (isBufferInvalid())
  1564. {
  1565. llwarns << "Invalid image buffer. Aborted." << llendl;
  1566. return result;
  1567. }
  1568. S32 components = getComponents();
  1569. if (components != 1 && components != 3 && components != 4)
  1570. {
  1571. llwarns << "Invalid number of components: " << components
  1572. << ". Aborted." << llendl;
  1573. return result;
  1574. }
  1575. S32 old_width = getWidth();
  1576. S32 old_height = getHeight();
  1577. if (old_width == new_width && old_height == new_height)
  1578. {
  1579. // Note: cannot use (std::nothrow) with our custom new() allocator
  1580. try
  1581. {
  1582. result = new LLImageRaw(old_width, old_height, components);
  1583. if (result.notNull() && !result->isBufferInvalid())
  1584. {
  1585. memcpy(result->getData(), getData(), getDataSize());
  1586. }
  1587. }
  1588. catch (std::bad_alloc&)
  1589. {
  1590. }
  1591. }
  1592. else
  1593. {
  1594. S32 new_data_size = new_width * new_height * components;
  1595. if (new_data_size > 0)
  1596. {
  1597. // Note: cannot use (std::nothrow) with our custom new() allocator
  1598. try
  1599. {
  1600. result = new LLImageRaw(new_width, new_height, components);
  1601. if (result.notNull() && !result->isBufferInvalid())
  1602. {
  1603. bilinear_scale(getData(), old_width, old_height,
  1604. components, old_width * components,
  1605. result->getData(), new_width, new_height,
  1606. components, new_width * components);
  1607. }
  1608. }
  1609. catch (std::bad_alloc&)
  1610. {
  1611. }
  1612. }
  1613. }
  1614. if (result.isNull())
  1615. {
  1616. llwarns << "Failed to allocate new image for size: "
  1617. << new_width << "x" << new_height << ". Out of memory ?"
  1618. << llendl;
  1619. }
  1620. return result;
  1621. }
  1622. void LLImageRaw::copyLineScaled(U8* in, U8* out, S32 in_pixel_len,
  1623. S32 out_pixel_len, S32 in_pixel_step,
  1624. S32 out_pixel_step)
  1625. {
  1626. const S32 components = getComponents();
  1627. llassert(components >= 1 && components <= 4);
  1628. const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
  1629. const F32 norm_factor = 1.f / ratio;
  1630. S32 goff = components >= 2 ? 1 : 0;
  1631. S32 boff = components >= 3 ? 2 : 0;
  1632. for (S32 x = 0; x < out_pixel_len; ++x)
  1633. {
  1634. // Sample input pixels in range from sample0 to sample1. Avoid floating
  1635. // point accumulation error... Do not just add ratio each time. JC
  1636. const F32 sample0 = x * ratio;
  1637. const F32 sample1 = (x + 1) * ratio;
  1638. // Left integer (floor)
  1639. const S32 index0 = llfloor(sample0);
  1640. // Right integer (floor)
  1641. const S32 index1 = llfloor(sample1);
  1642. // Spill over on left
  1643. const F32 fract0 = 1.f - sample0 + (F32)index0;
  1644. // Spill over on right
  1645. const F32 fract1 = sample1 - (F32)index1;
  1646. if (index0 == index1)
  1647. {
  1648. // Interval is embedded in one input pixel
  1649. S32 t0 = x * out_pixel_step * components;
  1650. S32 t1 = index0 * in_pixel_step * components;
  1651. U8* outp = out + t0;
  1652. U8* inp = in + t1;
  1653. for (S32 i = 0; i < components; ++i)
  1654. {
  1655. *outp = *inp;
  1656. ++outp;
  1657. ++inp;
  1658. }
  1659. }
  1660. else
  1661. {
  1662. // Left straddle
  1663. S32 t1 = index0 * in_pixel_step * components;
  1664. F32 r = in[t1 + 0] * fract0;
  1665. F32 g = in[t1 + goff] * fract0;
  1666. F32 b = in[t1 + boff] * fract0;
  1667. F32 a = 0;
  1668. if (components == 4)
  1669. {
  1670. a = in[t1 + 3] * fract0;
  1671. }
  1672. // Central interval
  1673. if (components < 4)
  1674. {
  1675. for (S32 u = index0 + 1; u < index1; ++u)
  1676. {
  1677. S32 t2 = u * in_pixel_step * components;
  1678. r += in[t2 + 0];
  1679. g += in[t2 + goff];
  1680. b += in[t2 + boff];
  1681. }
  1682. }
  1683. else
  1684. {
  1685. for (S32 u = index0 + 1; u < index1; ++u)
  1686. {
  1687. S32 t2 = u * in_pixel_step * components;
  1688. r += in[t2 + 0];
  1689. g += in[t2 + 1];
  1690. b += in[t2 + 2];
  1691. a += in[t2 + 3];
  1692. }
  1693. }
  1694. // right straddle
  1695. // Watch out for reading off of end of input array.
  1696. if (fract1 && index1 < in_pixel_len)
  1697. {
  1698. S32 t3 = index1 * in_pixel_step * components;
  1699. if (components < 4)
  1700. {
  1701. U8 in0 = in[t3];
  1702. U8 in1 = in[t3 + goff];
  1703. U8 in2 = in[t3 + boff];
  1704. r += in0 * fract1;
  1705. g += in1 * fract1;
  1706. b += in2 * fract1;
  1707. }
  1708. else
  1709. {
  1710. U8 in0 = in[t3++];
  1711. U8 in1 = in[t3++];
  1712. U8 in2 = in[t3++];
  1713. U8 in3 = in[t3];
  1714. r += in0 * fract1;
  1715. g += in1 * fract1;
  1716. b += in2 * fract1;
  1717. a += in3 * fract1;
  1718. }
  1719. }
  1720. U8 arr[] = {
  1721. U8(ll_roundp(r * norm_factor)),
  1722. U8(ll_roundp(g * norm_factor)),
  1723. U8(ll_roundp(b * norm_factor)),
  1724. U8(ll_roundp(a * norm_factor))
  1725. }; // Skip conditional
  1726. S32 t4 = x * out_pixel_step * components;
  1727. memcpy(out + t4, arr, sizeof(U8) * components);
  1728. }
  1729. }
  1730. }
  1731. void LLImageRaw::compositeRowScaled4onto3(U8* in, U8* out, S32 in_pixel_len,
  1732. S32 out_pixel_len)
  1733. {
  1734. llassert(getComponents() == 3);
  1735. constexpr S32 IN_COMPONENTS = 4;
  1736. constexpr S32 OUT_COMPONENTS = 3;
  1737. const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
  1738. const F32 norm_factor = 1.f / ratio;
  1739. for (S32 x = 0; x < out_pixel_len; ++x)
  1740. {
  1741. // Sample input pixels in range from sample0 to sample1.
  1742. // Avoid floating point accumulation error; do not just add ratio each
  1743. // time. JC
  1744. const F32 sample0 = x * ratio;
  1745. const F32 sample1 = (x + 1) * ratio;
  1746. const S32 index0 = S32(sample0); // Left integer (floor)
  1747. const S32 index1 = S32(sample1); // Right integer (floor)
  1748. const F32 fract0 = 1.f - (sample0 - F32(index0)); // Spill over on left
  1749. const F32 fract1 = sample1 - F32(index1); // Spill-over on right
  1750. U8 in_scaled_r;
  1751. U8 in_scaled_g;
  1752. U8 in_scaled_b;
  1753. U8 in_scaled_a;
  1754. if (index0 == index1)
  1755. {
  1756. // Interval is embedded in one input pixel
  1757. S32 t1 = index0 * IN_COMPONENTS;
  1758. in_scaled_r = in[t1];
  1759. in_scaled_g = in[t1];
  1760. in_scaled_b = in[t1];
  1761. in_scaled_a = in[t1];
  1762. }
  1763. else
  1764. {
  1765. // Left straddle
  1766. S32 t1 = index0 * IN_COMPONENTS;
  1767. F32 r = in[t1] * fract0;
  1768. F32 g = in[t1 + 1] * fract0;
  1769. F32 b = in[t1 + 2] * fract0;
  1770. F32 a = in[t1 + 3] * fract0;
  1771. // Central interval
  1772. for (S32 u = index0 + 1; u < index1; ++u)
  1773. {
  1774. S32 t2 = u * IN_COMPONENTS;
  1775. r += in[t2];
  1776. g += in[t2 + 1];
  1777. b += in[t2 + 2];
  1778. a += in[t2 + 3];
  1779. }
  1780. // right straddle
  1781. // Watch out for reading off of end of input array.
  1782. if (fract1 && index1 < in_pixel_len)
  1783. {
  1784. S32 t3 = index1 * IN_COMPONENTS;
  1785. r += in[t3] * fract1;
  1786. g += in[t3 + 1] * fract1;
  1787. b += in[t3 + 2] * fract1;
  1788. a += in[t3 + 3] * fract1;
  1789. }
  1790. r *= norm_factor;
  1791. g *= norm_factor;
  1792. b *= norm_factor;
  1793. a *= norm_factor;
  1794. in_scaled_r = U8(ll_roundp(r));
  1795. in_scaled_g = U8(ll_roundp(g));
  1796. in_scaled_b = U8(ll_roundp(b));
  1797. in_scaled_a = U8(ll_roundp(a));
  1798. }
  1799. if (in_scaled_a)
  1800. {
  1801. if (255 == in_scaled_a)
  1802. {
  1803. out[0] = in_scaled_r;
  1804. out[1] = in_scaled_g;
  1805. out[2] = in_scaled_b;
  1806. }
  1807. else
  1808. {
  1809. U8 transparency = 255 - in_scaled_a;
  1810. out[0] = fastFractionalMult(out[0], transparency) +
  1811. fastFractionalMult(in_scaled_r, in_scaled_a);
  1812. out[1] = fastFractionalMult(out[1], transparency) +
  1813. fastFractionalMult(in_scaled_g, in_scaled_a);
  1814. out[2] = fastFractionalMult(out[2], transparency) +
  1815. fastFractionalMult(in_scaled_b, in_scaled_a);
  1816. }
  1817. }
  1818. out += OUT_COMPONENTS;
  1819. }
  1820. }
  1821. static struct
  1822. {
  1823. const char* exten;
  1824. EImageCodec codec;
  1825. }
  1826. file_extensions[] =
  1827. {
  1828. { "bmp", IMG_CODEC_BMP },
  1829. { "tga", IMG_CODEC_TGA },
  1830. { "j2c", IMG_CODEC_J2C },
  1831. { "jp2", IMG_CODEC_J2C },
  1832. { "texture", IMG_CODEC_J2C },
  1833. { "jpg", IMG_CODEC_JPEG },
  1834. { "jpeg", IMG_CODEC_JPEG },
  1835. { "png", IMG_CODEC_PNG }
  1836. };
  1837. constexpr S32 num_file_extensions = LL_ARRAY_SIZE(file_extensions);
  1838. static std::string find_file(std::string& name, S8* codec)
  1839. {
  1840. std::string tname;
  1841. for (S32 i = 0; i < num_file_extensions; ++i)
  1842. {
  1843. tname = name + "." + std::string(file_extensions[i].exten);
  1844. llifstream ifs(tname.c_str(), std::ifstream::binary);
  1845. if (ifs.is_open())
  1846. {
  1847. ifs.close();
  1848. if (codec)
  1849. {
  1850. *codec = file_extensions[i].codec;
  1851. }
  1852. return std::string(file_extensions[i].exten);
  1853. }
  1854. }
  1855. return "";
  1856. }
  1857. EImageCodec LLImageBase::getCodecFromExtension(const std::string& exten)
  1858. {
  1859. if (!exten.empty())
  1860. {
  1861. for (S32 i = 0; i < num_file_extensions; ++i)
  1862. {
  1863. if (file_extensions[i].exten == exten)
  1864. {
  1865. return file_extensions[i].codec;
  1866. }
  1867. }
  1868. }
  1869. return IMG_CODEC_INVALID;
  1870. }
  1871. bool LLImageRaw::createFromFile(const std::string& filename,
  1872. bool j2c_lowest_mip_only)
  1873. {
  1874. std::string name = filename;
  1875. size_t dotidx = name.rfind('.');
  1876. S8 codec = IMG_CODEC_INVALID;
  1877. std::string exten;
  1878. deleteData(); // Delete any existing data
  1879. if (dotidx != std::string::npos)
  1880. {
  1881. exten = name.substr(dotidx + 1);
  1882. LLStringUtil::toLower(exten);
  1883. codec = getCodecFromExtension(exten);
  1884. }
  1885. else
  1886. {
  1887. exten = find_file(name, &codec);
  1888. name = name + "." + exten;
  1889. }
  1890. if (codec == IMG_CODEC_INVALID)
  1891. {
  1892. return false; // Format not recognized
  1893. }
  1894. llifstream ifs(name.c_str(), std::ifstream::binary);
  1895. if (!ifs.is_open())
  1896. {
  1897. // SJB: changed from llinfos to LL_DEBUGS("Image") to reduce spam
  1898. LL_DEBUGS("Image") << "Unable to open image file: " << name << LL_ENDL;
  1899. return false;
  1900. }
  1901. ifs.seekg (0, std::ios::end);
  1902. int length = ifs.tellg();
  1903. if (j2c_lowest_mip_only && length > 2048)
  1904. {
  1905. length = 2048;
  1906. }
  1907. ifs.seekg (0, std::ios::beg);
  1908. if (!length)
  1909. {
  1910. llinfos << "Zero length file file: " << name << llendl;
  1911. return false;
  1912. }
  1913. LLPointer<LLImageFormatted> image;
  1914. switch (codec)
  1915. {
  1916. case IMG_CODEC_BMP:
  1917. image = new LLImageBMP();
  1918. break;
  1919. case IMG_CODEC_TGA:
  1920. image = new LLImageTGA();
  1921. break;
  1922. case IMG_CODEC_JPEG:
  1923. image = new LLImageJPEG();
  1924. break;
  1925. case IMG_CODEC_J2C:
  1926. image = new LLImageJ2C();
  1927. break;
  1928. default:
  1929. return false;
  1930. }
  1931. llassert(image.notNull());
  1932. U8* buffer = image->allocateData(length);
  1933. if (!buffer) return false;
  1934. ifs.read ((char*)buffer, length);
  1935. ifs.close();
  1936. bool success;
  1937. success = image->updateData();
  1938. if (success)
  1939. {
  1940. if (j2c_lowest_mip_only && codec == IMG_CODEC_J2C)
  1941. {
  1942. S32 width = image->getWidth();
  1943. S32 height = image->getHeight();
  1944. S32 discard_level = 0;
  1945. while (width > 1 && height > 1 && discard_level < MAX_DISCARD_LEVEL)
  1946. {
  1947. width >>= 1;
  1948. height >>= 1;
  1949. ++discard_level;
  1950. }
  1951. ((LLImageJ2C *)((LLImageFormatted*)image))->setDiscardLevel(discard_level);
  1952. }
  1953. success = image->decode(this);
  1954. }
  1955. image = NULL; // deletes image
  1956. if (!success)
  1957. {
  1958. deleteData();
  1959. llwarns << "Unable to decode image" << name << llendl;
  1960. return false;
  1961. }
  1962. return true;
  1963. }
  1964. //---------------------------------------------------------------------------
  1965. // LLImageFormatted
  1966. //---------------------------------------------------------------------------
  1967. LLImageFormatted::LLImageFormatted(S8 codec)
  1968. : LLImageBase(),
  1969. mCodec(codec),
  1970. mDecoding(0),
  1971. mDecoded(0),
  1972. mDiscardLevel(-1)
  1973. {
  1974. }
  1975. //virtual
  1976. void LLImageFormatted::resetLastError()
  1977. {
  1978. LLImage::setLastError("");
  1979. }
  1980. //virtual
  1981. void LLImageFormatted::setLastError(const std::string& message,
  1982. const std::string& filename)
  1983. {
  1984. std::string error = message;
  1985. if (!filename.empty())
  1986. {
  1987. error += " FILE: " + filename;
  1988. }
  1989. LLImage::setLastError(error);
  1990. }
  1991. //static
  1992. LLImageFormatted* LLImageFormatted::createFromType(S8 codec)
  1993. {
  1994. LLImageFormatted* image;
  1995. switch (codec)
  1996. {
  1997. case IMG_CODEC_BMP:
  1998. image = new LLImageBMP();
  1999. break;
  2000. case IMG_CODEC_TGA:
  2001. image = new LLImageTGA();
  2002. break;
  2003. case IMG_CODEC_JPEG:
  2004. image = new LLImageJPEG();
  2005. break;
  2006. case IMG_CODEC_PNG:
  2007. image = new LLImagePNG();
  2008. break;
  2009. case IMG_CODEC_J2C:
  2010. image = new LLImageJ2C();
  2011. break;
  2012. default:
  2013. image = NULL;
  2014. }
  2015. return image;
  2016. }
  2017. //static
  2018. LLImageFormatted* LLImageFormatted::createFromMimeType(const std::string& mimetype)
  2019. {
  2020. S8 codec = IMG_CODEC_INVALID;
  2021. if (mimetype == "image/bmp")
  2022. {
  2023. codec = IMG_CODEC_BMP;
  2024. }
  2025. else if (mimetype == "image/tga")
  2026. {
  2027. codec = IMG_CODEC_TGA;
  2028. }
  2029. else if (mimetype == "image/jpeg")
  2030. {
  2031. codec = IMG_CODEC_JPEG;
  2032. }
  2033. else if (mimetype == "image/png")
  2034. {
  2035. codec = IMG_CODEC_PNG;
  2036. }
  2037. else if (mimetype == "image/j2c")
  2038. {
  2039. codec = IMG_CODEC_J2C;
  2040. }
  2041. else
  2042. {
  2043. return NULL;
  2044. }
  2045. return createFromType(codec);
  2046. }
  2047. //static
  2048. LLImageFormatted* LLImageFormatted::createFromExtension(const std::string& instring)
  2049. {
  2050. std::string exten;
  2051. size_t dotidx = instring.rfind('.');
  2052. if (dotidx != std::string::npos)
  2053. {
  2054. exten = instring.substr(dotidx + 1);
  2055. }
  2056. else
  2057. {
  2058. exten = instring;
  2059. }
  2060. S8 codec = getCodecFromExtension(exten);
  2061. return createFromType(codec);
  2062. }
  2063. //virtual
  2064. void LLImageFormatted::dump()
  2065. {
  2066. LLImageBase::dump();
  2067. llinfos << "LLImageFormatted" << " mDecoding " << mDecoding << " mCodec "
  2068. << S32(mCodec) << " mDecoded " << mDecoded << llendl;
  2069. }
  2070. S32 LLImageFormatted::calcDataSize(S32 discard_level)
  2071. {
  2072. if (discard_level < 0)
  2073. {
  2074. discard_level = mDiscardLevel;
  2075. }
  2076. S32 w = getWidth() >> discard_level;
  2077. S32 h = getHeight() >> discard_level;
  2078. w = llmax(w, 1);
  2079. h = llmax(h, 1);
  2080. return w * h * getComponents();
  2081. }
  2082. S32 LLImageFormatted::calcDiscardLevelBytes(S32 bytes)
  2083. {
  2084. llassert(bytes >= 0);
  2085. S32 discard_level = 0;
  2086. while (true)
  2087. {
  2088. S32 bytes_needed = calcDataSize(discard_level); //virtual
  2089. if (bytes_needed <= bytes)
  2090. {
  2091. break;
  2092. }
  2093. if (++discard_level > MAX_IMAGE_MIP)
  2094. {
  2095. return -1;
  2096. }
  2097. }
  2098. return discard_level;
  2099. }
  2100. // Subclasses that can handle more than 4 channels should override this function.
  2101. bool LLImageFormatted::decodeChannels(LLImageRaw* raw_image, S32 first_channel,
  2102. S32 max_channel)
  2103. {
  2104. llassert(first_channel == 0 && max_channel == 4);
  2105. return decode(raw_image); // Loads first 4 channels by default.
  2106. }
  2107. bool LLImageFormatted::copyData(U8* data, S32 size)
  2108. {
  2109. if (data && (data != getData() || size != getDataSize()))
  2110. {
  2111. deleteData();
  2112. if (allocateData(size) && getData())
  2113. {
  2114. memcpy(getData(), data, size);
  2115. }
  2116. else
  2117. {
  2118. return false;
  2119. }
  2120. }
  2121. return true;
  2122. }
  2123. // LLImageFormatted becomes the owner of data
  2124. void LLImageFormatted::setData(U8* data, S32 size)
  2125. {
  2126. if (data && data != getData())
  2127. {
  2128. deleteData();
  2129. setDataAndSize(data, size); // Access private LLImageBase members
  2130. }
  2131. }
  2132. void LLImageFormatted::appendData(U8* data, S32 size)
  2133. {
  2134. if (data)
  2135. {
  2136. if (!getData())
  2137. {
  2138. setData(data, size);
  2139. }
  2140. else
  2141. {
  2142. S32 cursize = getDataSize();
  2143. S32 newsize = cursize + size;
  2144. if (reallocateData(newsize))
  2145. {
  2146. memcpy(getData() + cursize, data, size);
  2147. free_texture_mem(data);
  2148. }
  2149. }
  2150. }
  2151. }
  2152. bool LLImageFormatted::load(const std::string& filename)
  2153. {
  2154. resetLastError();
  2155. S64 file_size = 0;
  2156. LLFile infile(filename, "rb", &file_size);
  2157. if (!infile)
  2158. {
  2159. setLastError("Unable to open file for reading", filename);
  2160. return false;
  2161. }
  2162. if (file_size == 0)
  2163. {
  2164. setLastError("File is empty", filename);
  2165. return false;
  2166. }
  2167. U8* data = allocateData(file_size);
  2168. if (!data)
  2169. {
  2170. setLastError("Out of memory", filename);
  2171. return false;
  2172. }
  2173. if (infile.read(data, file_size) != file_size)
  2174. {
  2175. deleteData();
  2176. setLastError("Unable to read entire file");
  2177. return false;
  2178. }
  2179. return updateData();
  2180. }
  2181. bool LLImageFormatted::save(const std::string& filename)
  2182. {
  2183. const U8* datap = getData();
  2184. if (!datap)
  2185. {
  2186. llwarns << "NULL data pointer for raw image. Not saving: " << filename
  2187. << llendl;
  2188. return false;
  2189. }
  2190. S64 bytes = getDataSize();
  2191. if (bytes <= 0)
  2192. {
  2193. llwarns << "Nothing to write. Not saving: " << filename << llendl;
  2194. return false;
  2195. }
  2196. resetLastError();
  2197. LLFile out(filename, "wb");
  2198. if (!out)
  2199. {
  2200. setLastError("Unable to open file for writing", filename);
  2201. return false;
  2202. }
  2203. if (out.write(datap, bytes) != bytes)
  2204. {
  2205. setLastError("Short write (drive full ?)", filename);
  2206. return false;
  2207. }
  2208. return true;
  2209. }
  2210. S8 LLImageFormatted::getCodec() const
  2211. {
  2212. return mCodec;
  2213. }
  2214. static void avg4_colors4(const U8* a, const U8* b, const U8* c,
  2215. const U8* d, U8* dst)
  2216. {
  2217. dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
  2218. dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
  2219. dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
  2220. dst[3] = (U8)(((U32)(a[3]) + b[3] + c[3] + d[3])>>2);
  2221. }
  2222. static void avg4_colors3(const U8* a, const U8* b, const U8* c,
  2223. const U8* d, U8* dst)
  2224. {
  2225. dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
  2226. dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
  2227. dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
  2228. }
  2229. static void avg4_colors2(const U8* a, const U8* b, const U8* c,
  2230. const U8* d, U8* dst)
  2231. {
  2232. dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
  2233. dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
  2234. }
  2235. void LLImageBase::setDataAndSize(U8* data, S32 size)
  2236. {
  2237. mData = data;
  2238. mDataSize = size;
  2239. }
  2240. //static
  2241. void LLImageBase::generateMip(const U8* indata, U8* mipdata,
  2242. S32 width, S32 height, S32 nchannels)
  2243. {
  2244. llassert(width > 0 && height > 0);
  2245. U8* data = mipdata;
  2246. S32 in_width = width * 2;
  2247. for (S32 h = 0; h < height; ++h)
  2248. {
  2249. for (S32 w = 0; w < width; ++w)
  2250. {
  2251. switch (nchannels)
  2252. {
  2253. case 4:
  2254. avg4_colors4(indata, indata + 4, indata + 4 * in_width,
  2255. indata + 4 * in_width + 4, data);
  2256. break;
  2257. case 3:
  2258. avg4_colors3(indata, indata + 3, indata + 3 * in_width,
  2259. indata + 3 * in_width + 3, data);
  2260. break;
  2261. case 2:
  2262. avg4_colors2(indata, indata + 2, indata + 2 * in_width,
  2263. indata + 2 * in_width + 2, data);
  2264. break;
  2265. case 1:
  2266. *(U8*)data = (U8)(((U32)(indata[0]) + indata[1] +
  2267. indata[in_width] +
  2268. indata[in_width + 1]) >> 2);
  2269. break;
  2270. default:
  2271. llerrs << "Bad number of channels" << llendl;
  2272. }
  2273. indata += nchannels * 2;
  2274. data += nchannels;
  2275. }
  2276. indata += nchannels * in_width; // skip odd lines
  2277. }
  2278. }
  2279. //static
  2280. F32 LLImageBase::calc_download_priority(F32 virtual_size, F32 visible_pixels,
  2281. S32 bytes_sent)
  2282. {
  2283. F32 bytes_weight = 1.f;
  2284. if (!bytes_sent)
  2285. {
  2286. bytes_weight = 20.f;
  2287. }
  2288. else if (bytes_sent < 1000)
  2289. {
  2290. bytes_weight = 1.f;
  2291. }
  2292. else if (bytes_sent < 2000)
  2293. {
  2294. bytes_weight = 1.f / 1.5f;
  2295. }
  2296. else if (bytes_sent < 4000)
  2297. {
  2298. bytes_weight = 1.f / 3.f;
  2299. }
  2300. else if (bytes_sent < 8000)
  2301. {
  2302. bytes_weight = 1.f / 6.f;
  2303. }
  2304. else if (bytes_sent < 16000)
  2305. {
  2306. bytes_weight = 1.f / 12.f;
  2307. }
  2308. else if (bytes_sent < 32000)
  2309. {
  2310. bytes_weight = 1.f / 20.f;
  2311. }
  2312. else if (bytes_sent < 64000)
  2313. {
  2314. bytes_weight = 1.f / 32.f;
  2315. }
  2316. else
  2317. {
  2318. bytes_weight = 1.f / 64.f;
  2319. }
  2320. bytes_weight *= bytes_weight;
  2321. F32 virtual_size_factor = virtual_size * 0.01f;
  2322. // The goal is for weighted priority is to be <= 0 when we have reached a
  2323. // point where we have sent enough data.
  2324. F32 w_priority = log10f(bytes_weight * virtual_size_factor);
  2325. // We do not want to affect how MANY bytes we send based on the visible
  2326. // pixels, but the order in which they are sent. We post-multiply so we do
  2327. // not change the zero point.
  2328. if (w_priority > 0.f)
  2329. {
  2330. F32 pixel_weight = log10f(visible_pixels + 1) * 3.f;
  2331. w_priority *= pixel_weight;
  2332. }
  2333. return w_priority;
  2334. }