llvolume.cpp 168 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483
  1. /**
  2. * @file llvolume.cpp
  3. *
  4. * $LicenseInfo:firstyear=2002&license=viewergpl$
  5. *
  6. * Copyright (c) 2002-2009, Linden Research, Inc.
  7. *
  8. * Second Life Viewer Source Code
  9. * The source code in this file ("Source Code") is provided by Linden Lab
  10. * to you under the terms of the GNU General Public License, version 2.0
  11. * ("GPL"), unless you have obtained a separate licensing agreement
  12. * ("Other License"), formally executed by you and Linden Lab. Terms of
  13. * the GPL can be found in doc/GPL-license.txt in this distribution, or
  14. * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
  15. *
  16. * There are special exceptions to the terms and conditions of the GPL as
  17. * it is applied to this Source Code. View the full text of the exception
  18. * in the file doc/FLOSS-exception.txt in this software distribution, or
  19. * online at
  20. * http://secondlifegrid.net/programs/open_source/licensing/flossexception
  21. *
  22. * By copying, modifying or distributing this software, you acknowledge
  23. * that you have read and understood your obligations described above,
  24. * and agree to abide by those obligations.
  25. *
  26. * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
  27. * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
  28. * COMPLETENESS OR PERFORMANCE.
  29. * $/LicenseInfo$
  30. */
  31. #include "linden_common.h"
  32. #if !LL_WINDOWS
  33. # include <stdint.h>
  34. #endif
  35. #include <utility> // For std::swap()
  36. #include "meshoptimizer.h"
  37. #include "llmemory.h"
  38. #include "llvolume.h"
  39. #include "llmatrix4.h"
  40. #include "llmatrix3.h"
  41. #include "llmatrix3a.h"
  42. #include "llmeshoptimizer.h"
  43. #include "lloctree.h"
  44. #include "llsdserialize.h"
  45. #include "lltimer.h"
  46. #include "llvolumemgr.h"
  47. #include "llvolumeoctree.h"
  48. #if LL_OPENMP
  49. # include "llatomic.h"
  50. # include "llthread.h" // For is_main_thread()
  51. #endif
  52. #include "indra_constants.h"
  53. // "error: 'get_temporary_buffer<...>' is deprecated" seen with clang 18. HB
  54. #if CLANG_VERSION >= 180000
  55. # pragma clang diagnostic ignored "-Wdeprecated-declarations"
  56. #endif
  57. // Insert mikktspace implementation into llvolume object file
  58. #include "mikktspace/mikktspace.hh"
  59. static const F32 sTableScale[] =
  60. {
  61. 1.f, 1.f, 1.f, 0.5f, 0.707107f, 0.53f, 0.525f, 0.5f
  62. };
  63. // This avoids having to import llrender headers in llprimitive
  64. extern bool gDebugGL;
  65. extern bool gUsePBRShaders;
  66. bool LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end,
  67. const LLVector3& center, const LLVector3& size)
  68. {
  69. return LLLineSegmentBoxIntersect(start.mV, end.mV, center.mV, size.mV);
  70. }
  71. bool LLLineSegmentBoxIntersect(const F32* start, const F32* end,
  72. const F32* center, const F32* size)
  73. {
  74. F32 fAWdU[3];
  75. F32 dir[3];
  76. F32 diff[3];
  77. for (U32 i = 0; i < 3; ++i)
  78. {
  79. dir[i] = 0.5f * (end[i] - start[i]);
  80. diff[i] = (0.5f * (end[i] + start[i])) - center[i];
  81. fAWdU[i] = fabsf(dir[i]);
  82. if (fabsf(diff[i]) > size[i] + fAWdU[i])
  83. {
  84. return false;
  85. }
  86. }
  87. float f;
  88. f = dir[1] * diff[2] - dir[2] * diff[1];
  89. if (fabsf(f) > size[1] * fAWdU[2] + size[2] * fAWdU[1])
  90. {
  91. return false;
  92. }
  93. f = dir[2] * diff[0] - dir[0] * diff[2];
  94. if (fabsf(f) > size[0] * fAWdU[2] + size[2] * fAWdU[0])
  95. {
  96. return false;
  97. }
  98. f = dir[0] * diff[1] - dir[1] * diff[0];
  99. if (fabsf(f) > size[0] * fAWdU[1] + size[1] * fAWdU[0])
  100. {
  101. return false;
  102. }
  103. return true;
  104. }
  105. // Finds tangent vector based on three vertices with texture coordinates.
  106. // Fills in dummy values if the triangle has degenerate texture coordinates.
  107. void calc_tangent_from_triangle(LLVector4a& normal, LLVector4a& tangent_out,
  108. const LLVector4a& v1, const LLVector2& w1,
  109. const LLVector4a& v2, const LLVector2& w2,
  110. const LLVector4a& v3, const LLVector2& w3)
  111. {
  112. const F32* v1ptr = v1.getF32ptr();
  113. const F32* v2ptr = v2.getF32ptr();
  114. const F32* v3ptr = v3.getF32ptr();
  115. F32 x1 = v2ptr[0] - v1ptr[0];
  116. F32 x2 = v3ptr[0] - v1ptr[0];
  117. F32 y1 = v2ptr[1] - v1ptr[1];
  118. F32 y2 = v3ptr[1] - v1ptr[1];
  119. F32 z1 = v2ptr[2] - v1ptr[2];
  120. F32 z2 = v3ptr[2] - v1ptr[2];
  121. F32 s1 = w2.mV[0] - w1.mV[0];
  122. F32 s2 = w3.mV[0] - w1.mV[0];
  123. F32 t1 = w2.mV[1] - w1.mV[1];
  124. F32 t2 = w3.mV[1] - w1.mV[1];
  125. F32 rd = s1 * t2 - s2 * t1;
  126. F32 r = rd * rd > FLT_EPSILON ? 1.f / rd
  127. : (rd > 0.f ? 1024.f : -1024.f);
  128. llassert(llfinite(r));
  129. llassert(!llisnan(r));
  130. LLVector4a sdir((t2 * x1 - t1 * x2) * r, (t2 * y1 - t1 * y2) * r,
  131. (t2 * z1 - t1 * z2) * r);
  132. LLVector4a tdir((s1 * x2 - s2 * x1) * r, (s1 * y2 - s2 * y1) * r,
  133. (s1 * z2 - s2 * z1) * r);
  134. LLVector4a n = normal;
  135. LLVector4a t = sdir;
  136. LLVector4a ncrosst;
  137. ncrosst.setCross3(n, t);
  138. // Gram-Schmidt orthogonalize
  139. n.mul(n.dot3(t).getF32());
  140. LLVector4a tsubn;
  141. tsubn.setSub(t, n);
  142. if (tsubn.dot3(tsubn).getF32() > F_APPROXIMATELY_ZERO)
  143. {
  144. tsubn.normalize3fast_checked();
  145. // Calculate handedness
  146. F32 handedness = ncrosst.dot3(tdir).getF32() < 0.f ? -1.f : 1.f;
  147. tsubn.getF32ptr()[3] = handedness;
  148. tangent_out = tsubn;
  149. }
  150. else
  151. {
  152. // Degenerate, make up a value
  153. tangent_out.set(0.f, 0.f, 1.f, 1.f);
  154. }
  155. }
  156. // Intersect test between triangle vert0, vert1, vert2 and a ray from orig in
  157. // direction dir. Returns true if intersecting and returns barycentric
  158. // coordinates in intersection_a, intersection_b, and returns the intersection
  159. // point along dir in intersection_t.
  160. // Moller-Trumbore algorithm
  161. bool LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1,
  162. const LLVector4a& vert2, const LLVector4a& orig,
  163. const LLVector4a& dir, F32& intersection_a,
  164. F32& intersection_b, F32& intersection_t)
  165. {
  166. // Find vectors for two edges sharing vert0
  167. LLVector4a edge1;
  168. edge1.setSub(vert1, vert0);
  169. LLVector4a edge2;
  170. edge2.setSub(vert2, vert0);
  171. // Begin calculating determinant - also used to calculate U parameter
  172. LLVector4a pvec;
  173. pvec.setCross3(dir, edge2);
  174. // If determinant is near zero, ray lies in plane of triangle
  175. LLVector4a det;
  176. det.setAllDot3(edge1, pvec);
  177. if (det.greaterEqual(LLVector4a::getEpsilon()).getGatheredBits() & 0x7)
  178. {
  179. // Calculate distance from vert0 to ray origin
  180. LLVector4a tvec;
  181. tvec.setSub(orig, vert0);
  182. // Calculate U parameter and test bounds
  183. LLVector4a u;
  184. u.setAllDot3(tvec, pvec);
  185. if ((u.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7) &&
  186. (u.lessEqual(det).getGatheredBits() & 0x7))
  187. {
  188. // Prepare to test V parameter
  189. LLVector4a qvec;
  190. qvec.setCross3(tvec, edge1);
  191. // Calculate V parameter and test bounds
  192. LLVector4a v;
  193. v.setAllDot3(dir, qvec);
  194. LLVector4a sum_uv;
  195. sum_uv.setAdd(u, v);
  196. S32 v_gequal =
  197. v.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7;
  198. S32 sum_lequal = sum_uv.lessEqual(det).getGatheredBits() & 0x7;
  199. if (v_gequal && sum_lequal)
  200. {
  201. // Calculate t, scale parameters, ray intersects triangle
  202. LLVector4a t;
  203. t.setAllDot3(edge2, qvec);
  204. t.div(det);
  205. u.div(det);
  206. v.div(det);
  207. intersection_a = u[0];
  208. intersection_b = v[0];
  209. intersection_t = t[0];
  210. return true;
  211. }
  212. }
  213. }
  214. return false;
  215. }
  216. bool LLTriangleRayIntersectTwoSided(const LLVector4a& vert0,
  217. const LLVector4a& vert1,
  218. const LLVector4a& vert2,
  219. const LLVector4a& orig,
  220. const LLVector4a& dir,
  221. F32& intersection_a,
  222. F32& intersection_b,
  223. F32& intersection_t)
  224. {
  225. F32 u, v, t;
  226. // Find vectors for two edges sharing vert0
  227. LLVector4a edge1;
  228. edge1.setSub(vert1, vert0);
  229. LLVector4a edge2;
  230. edge2.setSub(vert2, vert0);
  231. // Begin calculating determinant - also used to calculate U parameter
  232. LLVector4a pvec;
  233. pvec.setCross3(dir, edge2);
  234. // If determinant is near zero, ray lies in plane of triangle
  235. F32 det = edge1.dot3(pvec).getF32();
  236. if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO)
  237. {
  238. return false;
  239. }
  240. F32 inv_det = 1.f / det;
  241. // Calculate distance from vert0 to ray origin
  242. LLVector4a tvec;
  243. tvec.setSub(orig, vert0);
  244. // Calculate U parameter and test bounds
  245. u = tvec.dot3(pvec).getF32() * inv_det;
  246. if (u < 0.f || u > 1.f)
  247. {
  248. return false;
  249. }
  250. // Prepare to test V parameter
  251. tvec.sub(edge1);
  252. // Calculate V parameter and test bounds
  253. v = dir.dot3(tvec).getF32() * inv_det;
  254. if (v < 0.f || u + v > 1.f)
  255. {
  256. return false;
  257. }
  258. // Calculate t, ray intersects triangle
  259. t = edge2.dot3(tvec).getF32() * inv_det;
  260. intersection_a = u;
  261. intersection_b = v;
  262. intersection_t = t;
  263. return true;
  264. }
  265. // helper for non-aligned vectors
  266. bool LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1,
  267. const LLVector3& vert2, const LLVector3& orig,
  268. const LLVector3& dir, F32& intersection_a,
  269. F32& intersection_b, F32& intersection_t,
  270. bool two_sided)
  271. {
  272. LLVector4a vert0a, vert1a, vert2a, origa, dira;
  273. vert0a.load3(vert0.mV);
  274. vert1a.load3(vert1.mV);
  275. vert2a.load3(vert2.mV);
  276. origa.load3(orig.mV);
  277. dira.load3(dir.mV);
  278. if (two_sided)
  279. {
  280. return LLTriangleRayIntersectTwoSided(vert0a, vert1a, vert2a, origa,
  281. dira, intersection_a,
  282. intersection_b, intersection_t);
  283. }
  284. else
  285. {
  286. return LLTriangleRayIntersect(vert0a, vert1a, vert2a, origa, dira,
  287. intersection_a, intersection_b,
  288. intersection_t);
  289. }
  290. }
  291. // Finds the point on a triangle closest to a given target point algorithm
  292. // derived from:
  293. // http://www.geometrictools.com/Documentation/DistancePoint3Triangle3.pdf
  294. // (returns distance squared and barycentric coordinates)
  295. F32 LLTriangleClosestPoint(const LLVector3& vert0, const LLVector3& vert1,
  296. const LLVector3& vert2, const LLVector3& target,
  297. F32& closest_a, F32& closest_b)
  298. {
  299. // Edges of triangle
  300. LLVector3 edge0 = vert1 - vert0;
  301. LLVector3 edge1 = vert2 - vert0;
  302. LLVector3 delta = vert0 - target;
  303. // Length of triangle edges
  304. F32 a00 = edge0.lengthSquared();
  305. F32 a01 = edge0 * edge1;
  306. F32 a11 = edge1.lengthSquared();
  307. F32 b0 = delta * edge0;
  308. F32 b1 = delta * edge1;
  309. F32 c = delta.lengthSquared();
  310. F32 det = fabs(a00 * a11 - a01 * a01);
  311. F32 s = a01 * b1 - a11 * b0;
  312. F32 t = a01 * b0 - a00 * b1;
  313. F32 dist_squared;
  314. if (s + t <= det)
  315. {
  316. if (s < 0.f)
  317. {
  318. if (t < 0.f) // region 4
  319. {
  320. if (b0 < 0.f)
  321. {
  322. t = 0.f;
  323. if (-b0 >= a00)
  324. {
  325. s = 1.f;
  326. dist_squared = a00 + 2.f * b0 + c;
  327. }
  328. else
  329. {
  330. s = -b0 / a00;
  331. dist_squared = b0 * s + c;
  332. }
  333. }
  334. else
  335. {
  336. s = 0.f;
  337. if (b1 >= 0.f)
  338. {
  339. t = 0.f;
  340. dist_squared = c;
  341. }
  342. else if (-b1 >= a11)
  343. {
  344. t = 1.f;
  345. dist_squared = a11 + 2.f * b1 + c;
  346. }
  347. else
  348. {
  349. t = -b1 / a11;
  350. dist_squared = b1 * t + c;
  351. }
  352. }
  353. }
  354. else // region 3
  355. {
  356. s = 0.f;
  357. if (b1 >= 0.f)
  358. {
  359. t = 0.f;
  360. dist_squared = c;
  361. }
  362. else if (-b1 >= a11)
  363. {
  364. t = 1.f;
  365. dist_squared = a11 + 2.f * b1 + c;
  366. }
  367. else
  368. {
  369. t = -b1 / a11;
  370. dist_squared = b1 * t + c;
  371. }
  372. }
  373. }
  374. else if (t < 0.f) // region 5
  375. {
  376. t = 0.f;
  377. if (b0 >= 0.f)
  378. {
  379. s = 0.f;
  380. dist_squared = c;
  381. }
  382. else if (-b0 >= a00)
  383. {
  384. s = 1.f;
  385. dist_squared = a00 + 2.f * b0 + c;
  386. }
  387. else
  388. {
  389. s = -b0 / a00;
  390. dist_squared = b0 * s + c;
  391. }
  392. }
  393. else // region 0
  394. {
  395. // Minimum at interior point
  396. F32 det_inv = 1.f / det;
  397. s *= det_inv;
  398. t *= det_inv;
  399. dist_squared = s * (a00 * s + a01 * t + 2.f * b0) +
  400. t * (a01 * s + a11 * t + 2.f * b1) + c;
  401. }
  402. }
  403. else
  404. {
  405. F32 tmp0, tmp1, numerator, denominator;
  406. if (s < 0.f) // region 2
  407. {
  408. tmp0 = a01 + b0;
  409. tmp1 = a11 + b1;
  410. if (tmp1 > tmp0)
  411. {
  412. numerator = tmp1 - tmp0;
  413. denominator = a00 - 2.f * a01 + a11;
  414. if (numerator >= denominator)
  415. {
  416. s = 1.f;
  417. t = 0.f;
  418. dist_squared = a00 + 2.f * b0 + c;
  419. }
  420. else
  421. {
  422. s = numerator / denominator;
  423. t = 1.f - s;
  424. dist_squared = s * (a00 * s + a01 * t + 2.f * b0) +
  425. t * (a01 * s + a11 * t + 2.f * b1) + c;
  426. }
  427. }
  428. else
  429. {
  430. s = 0.f;
  431. if (tmp1 <= 0.f)
  432. {
  433. t = 1.f;
  434. dist_squared = a11 + 2.f * b1 + c;
  435. }
  436. else if (b1 >= 0.f)
  437. {
  438. t = 0.f;
  439. dist_squared = c;
  440. }
  441. else
  442. {
  443. t = -b1 / a11;
  444. dist_squared = b1 * t + c;
  445. }
  446. }
  447. }
  448. else if (t < 0.f) // region 6
  449. {
  450. tmp0 = a01 + b1;
  451. tmp1 = a00 + b0;
  452. if (tmp1 > tmp0)
  453. {
  454. numerator = tmp1 - tmp0;
  455. denominator = a00 - 2.f * a01 + a11;
  456. if (numerator >= denominator)
  457. {
  458. t = 1.f;
  459. s = 0.f;
  460. dist_squared = a11 + 2.f * b1 + c;
  461. }
  462. else
  463. {
  464. t = numerator / denominator;
  465. s = 1.f - t;
  466. dist_squared = s * (a00 * s + a01 * t + 2.f * b0) +
  467. t * (a01 * s + a11 * t + 2.f * b1) + c;
  468. }
  469. }
  470. else
  471. {
  472. t = 0.f;
  473. if (tmp1 <= 0.f)
  474. {
  475. s = 1.f;
  476. dist_squared = a00 + 2.f * b0 + c;
  477. }
  478. else if (b0 >= 0.f)
  479. {
  480. s = 0.f;
  481. dist_squared = c;
  482. }
  483. else
  484. {
  485. s = -b0 / a00;
  486. dist_squared = b0 * s + c;
  487. }
  488. }
  489. }
  490. else // region 1
  491. {
  492. numerator = a11 + b1 - a01 - b0;
  493. if ( numerator <= 0.f )
  494. {
  495. s = 0.f;
  496. t = 1.f;
  497. dist_squared = a11 + 2.f * b1 + c;
  498. }
  499. else
  500. {
  501. denominator = a00 - 2.f * a01 + a11;
  502. if (numerator >= denominator)
  503. {
  504. s = 1.f;
  505. t = 0.f;
  506. dist_squared = a00 + 2.f * b0 + c;
  507. }
  508. else
  509. {
  510. s = numerator / denominator;
  511. t = 1.f - s;
  512. dist_squared = s * (a00 * s + a01 * t + 2.f * b0) +
  513. t * (a01 * s + a11 * t + 2.f * b1) + c;
  514. }
  515. }
  516. }
  517. }
  518. closest_a = s;
  519. closest_b = t;
  520. return fabs(dist_squared);
  521. }
  522. LLProfile::Face* LLProfile::addCap(S16 face_id)
  523. {
  524. size_t count = mFaces.size();
  525. mFaces.resize(count + 1);
  526. Face* facep = &(mFaces[count]);
  527. if (LL_UNLIKELY(!facep))
  528. {
  529. LLMemory::allocationFailed();
  530. llwarns << "Out of memory, face not added !" << llendl;
  531. return NULL;
  532. }
  533. facep->mIndex = 0;
  534. facep->mCount = mTotal;
  535. facep->mScaleU = 1.f;
  536. facep->mCap = true;
  537. facep->mFaceID = face_id;
  538. return facep;
  539. }
  540. LLProfile::Face* LLProfile::addFace(S32 i, S32 count, F32 u_scale, S16 face_id,
  541. bool flat)
  542. {
  543. size_t faces = mFaces.size();
  544. mFaces.resize(faces + 1);
  545. Face* facep = &(mFaces[faces]);
  546. if (LL_UNLIKELY(!facep))
  547. {
  548. LLMemory::allocationFailed();
  549. llwarns << "Out of memory, face not added !" << llendl;
  550. return NULL;
  551. }
  552. facep->mIndex = i;
  553. facep->mCount = count;
  554. facep->mScaleU = u_scale;
  555. facep->mFlat = flat;
  556. facep->mCap = false;
  557. facep->mFaceID = face_id;
  558. return facep;
  559. }
  560. // This is basically LLProfile::genNGon stripped down to only the operations
  561. // that influence the number of points
  562. //static
  563. S32 LLProfile::getNumNGonPoints(const LLProfileParams& params, S32 sides,
  564. F32 ang_scale, S32 split)
  565. {
  566. // Generate an n-sided "circular" path. 0 is (1,0), and we go counter-
  567. // clockwise along a circular path from there.
  568. F32 begin = params.getBegin();
  569. F32 end = params.getEnd();
  570. F32 t_step = 1.f / (F32)sides;
  571. F32 t_first = floor(begin * sides) / (F32)sides;
  572. // pt1 is the first point on the fractional face. Starting t and ang values
  573. // for the first face. Increment to the next point. pt2 is the end point on
  574. // the fractional face.
  575. F32 t = t_first + t_step;
  576. F32 t_fraction = (begin - t_first) * sides;
  577. // Only use if it is not almost exactly on an edge.
  578. S32 np = 0;
  579. if (t_fraction < 0.9999f)
  580. {
  581. ++np;
  582. }
  583. // There's lots of potential here for floating point error to generate
  584. // unneeded extra points - DJS 04/05/02
  585. while (t < end)
  586. {
  587. // Iterate through all the integer steps of t.
  588. ++np;
  589. t += t_step;
  590. }
  591. // Find the fraction that we need to add to the end point.
  592. t_fraction = (end - t + t_step) * sides;
  593. if (t_fraction > 0.0001f)
  594. {
  595. ++np;
  596. }
  597. // If we are sliced, the profile is open.
  598. if ((end - begin) * ang_scale < 0.99f)
  599. {
  600. if (params.getHollow() <= 0)
  601. {
  602. // Put center point if not hollow.
  603. ++np;
  604. }
  605. }
  606. return np;
  607. }
  608. void LLProfile::genNGon(const LLProfileParams& params, S32 sides, F32 offset,
  609. F32 ang_scale, S32 split)
  610. {
  611. // Generate an n-sided "circular" path. 0 is (1,0), and we go counter-
  612. // clockwise along a circular path from there
  613. F32 begin = params.getBegin();
  614. F32 end = params.getEnd();
  615. F32 t_step = 1.f / sides;
  616. F32 ang_step = 2.f * F_PI * t_step * ang_scale;
  617. // Scale to have size "match" scale. Compensates to get object to generally
  618. // fill bounding box.
  619. // Total number of sides all around:
  620. S32 total_sides = ll_roundp(sides / ang_scale);
  621. F32 scale = 0.5f;
  622. if (total_sides < 8)
  623. {
  624. scale = sTableScale[total_sides];
  625. }
  626. F32 t_first = floor(begin * sides) / (F32)sides;
  627. // pt1 is the first point on the fractional face.
  628. // Starting t and ang values for the first face
  629. F32 t = t_first;
  630. F32 ang = 2.f * F_PI * (t * ang_scale + offset);
  631. LLVector4a pt1;
  632. pt1.set(cosf(ang) * scale, sinf(ang) * scale, t);
  633. // Increment to the next point. pt2 is the end point on the fractional face
  634. t += t_step;
  635. ang += ang_step;
  636. LLVector4a pt2;
  637. pt2.set(cosf(ang) * scale, sinf(ang) * scale, t);
  638. F32 t_fraction = (begin - t_first) * sides;
  639. // Only use if it is not almost exactly on an edge.
  640. if (t_fraction < 0.9999f)
  641. {
  642. LLVector4a new_pt;
  643. new_pt.setLerp(pt1, pt2, t_fraction);
  644. mVertices.push_back(new_pt);
  645. }
  646. // There is lots of potential here for floating point error to generate
  647. // unneeded extra points - DJS 04/05/02
  648. while (t < end)
  649. {
  650. // Iterate through all the integer steps of t.
  651. pt1.set(cosf(ang) * scale, sinf(ang) * scale, t);
  652. if (mVertices.size() > 0)
  653. {
  654. LLVector4a p = mVertices[mVertices.size() - 1];
  655. LLVector4a new_pt;
  656. for (S32 i = 0; i < split && mVertices.size() > 0; ++i)
  657. {
  658. new_pt.setSub(pt1, p);
  659. new_pt.mul(1.f / (F32)(split + 1) * (F32)(i + 1));
  660. new_pt.add(p);
  661. mVertices.push_back(new_pt);
  662. }
  663. }
  664. mVertices.push_back(pt1);
  665. t += t_step;
  666. ang += ang_step;
  667. }
  668. // pt1 is the first point on the fractional face
  669. // pt2 is the end point on the fractional face
  670. pt2.set(cosf(ang) * scale, sinf(ang) * scale, t);
  671. // Find the fraction that we need to add to the end point.
  672. t_fraction = (end - t + t_step) * sides;
  673. if (t_fraction > 0.0001f)
  674. {
  675. LLVector4a new_pt;
  676. new_pt.setLerp(pt1, pt2, t_fraction);
  677. if (mVertices.size() > 0)
  678. {
  679. LLVector4a p = mVertices[mVertices.size() - 1];
  680. for (S32 i = 0; i < split && mVertices.size() > 0; ++i)
  681. {
  682. LLVector4a pt1;
  683. pt1.setSub(new_pt, p);
  684. pt1.mul(1.f / (F32)(split + 1) * (F32)(i + 1));
  685. pt1.add(p);
  686. mVertices.push_back(pt1);
  687. }
  688. }
  689. mVertices.push_back(new_pt);
  690. }
  691. // If we are sliced, the profile is open.
  692. if ((end - begin) * ang_scale < 0.99f)
  693. {
  694. if ((end - begin) * ang_scale > 0.5f)
  695. {
  696. mConcave = true;
  697. }
  698. else
  699. {
  700. mConcave = false;
  701. }
  702. mOpen = true;
  703. if (params.getHollow() <= 0)
  704. {
  705. // Put center point if not hollow.
  706. mVertices.push_back(LLVector4a(0.f, 0.f, 0.f));
  707. }
  708. }
  709. else
  710. {
  711. // The profile is not open.
  712. mOpen = false;
  713. mConcave = false;
  714. }
  715. mTotal = mVertices.size();
  716. }
  717. // Hollow is percent of the original bounding box, not of this particular
  718. // profile's geometry. Thus, a swept triangle needs lower hollow values than a
  719. // swept square. Note that addHole will NOT work for non-"circular" profiles,
  720. // if we ever decide to use them.
  721. LLProfile::Face* LLProfile::addHole(const LLProfileParams& params, bool flat,
  722. F32 sides, F32 offset, F32 box_hollow,
  723. F32 ang_scale, S32 split)
  724. {
  725. // Total add has number of vertices on outside.
  726. mTotalOut = mTotal;
  727. genNGon(params, llfloor(sides), offset, ang_scale, split);
  728. Face* face = addFace(mTotalOut, mTotal - mTotalOut, 0, LL_FACE_INNER_SIDE,
  729. flat);
  730. // thread_local and not just static, because this method can be indirectly
  731. // called (via the generate(...) method) by both the main thread and the
  732. // mesh repository thread. HB
  733. thread_local LLAlignedArray<LLVector4a, 64> pt;
  734. pt.resize(mTotal);
  735. for (S32 i = mTotalOut; i < mTotal; ++i)
  736. {
  737. pt[i] = mVertices[i];
  738. pt[i].mul(box_hollow);
  739. }
  740. S32 j = mTotal - 1;
  741. for (S32 i = mTotalOut; i < mTotal; ++i)
  742. {
  743. mVertices[i] = pt[j--];
  744. }
  745. for (S32 i = 0, count = mFaces.size(); i < count; ++i)
  746. {
  747. if (mFaces[i].mCap)
  748. {
  749. mFaces[i].mCount *= 2;
  750. }
  751. }
  752. return face;
  753. }
  754. // This is basically LLProfile::generate stripped down to only operations that
  755. // influence the number of points
  756. //static
  757. S32 LLProfile::getNumPoints(const LLProfileParams& params, bool path_open,
  758. F32 detail, S32 split, bool is_sculpted,
  759. S32 sculpt_size)
  760. {
  761. if (detail < 0.f)
  762. {
  763. detail = 0.f;
  764. }
  765. // Generate the face data
  766. F32 hollow = params.getHollow();
  767. S32 np = 0;
  768. switch (params.getCurveType() & LL_PCODE_PROFILE_MASK)
  769. {
  770. case LL_PCODE_PROFILE_SQUARE:
  771. {
  772. np = getNumNGonPoints(params, 4, 1.f, split);
  773. if (hollow)
  774. {
  775. np *= 2;
  776. }
  777. break;
  778. }
  779. case LL_PCODE_PROFILE_ISOTRI:
  780. case LL_PCODE_PROFILE_RIGHTTRI:
  781. case LL_PCODE_PROFILE_EQUALTRI:
  782. {
  783. np = getNumNGonPoints(params, 3, 1.f, split);
  784. if (hollow)
  785. {
  786. np *= 2;
  787. }
  788. break;
  789. }
  790. case LL_PCODE_PROFILE_CIRCLE:
  791. {
  792. // If this has a square hollow, we should adjust the number of
  793. // faces a bit so that the geometry lines up.
  794. U8 hole_type = 0;
  795. F32 circle_detail = MIN_DETAIL_FACES * detail;
  796. if (hollow)
  797. {
  798. hole_type = params.getCurveType() & LL_PCODE_HOLE_MASK;
  799. if (hole_type == LL_PCODE_HOLE_SQUARE)
  800. {
  801. // Snap to the next multiple of four sides, so that corners
  802. // line up
  803. circle_detail = llceil(circle_detail * 0.25f) * 4.f;
  804. }
  805. }
  806. S32 sides = (S32)circle_detail;
  807. if (is_sculpted)
  808. {
  809. sides = sculpt_size;
  810. }
  811. np = getNumNGonPoints(params, sides);
  812. if (hollow)
  813. {
  814. np *= 2;
  815. }
  816. break;
  817. }
  818. case LL_PCODE_PROFILE_CIRCLE_HALF:
  819. {
  820. // If this has a square hollow, we should adjust the number of
  821. // faces a bit so that the geometry lines up.
  822. U8 hole_type = 0;
  823. // Number of faces is cut in half because it's only a half-circle.
  824. F32 circle_detail = MIN_DETAIL_FACES * detail * 0.5f;
  825. if (hollow)
  826. {
  827. hole_type = params.getCurveType() & LL_PCODE_HOLE_MASK;
  828. if (hole_type == LL_PCODE_HOLE_SQUARE)
  829. {
  830. // Snap to the next multiple of four sides (div 2), so that
  831. // corners line up.
  832. circle_detail = llceil(circle_detail * 0.5f) * 2.f;
  833. }
  834. }
  835. np = getNumNGonPoints(params, llfloor(circle_detail), 0.5f);
  836. if (hollow)
  837. {
  838. np *= 2;
  839. }
  840. // Special case for openness of sphere
  841. if (params.getEnd() - params.getBegin() < 1.f)
  842. {
  843. }
  844. else if (!hollow)
  845. {
  846. np++;
  847. }
  848. break;
  849. }
  850. default:
  851. break;
  852. }
  853. return np;
  854. }
  855. bool LLProfile::generate(const LLProfileParams& params, bool path_open,
  856. F32 detail, S32 split, bool is_sculpted,
  857. S32 sculpt_size)
  858. {
  859. // We need a mutex here, because this code can be called both from the main
  860. // thread (via sculpt() which is called from LLVOVolume) and from the mesh
  861. // repository thread (via LLVolume() -> LLVolume::generate()), when a new
  862. // LOD is created (by LLMeshRepoThread::lodReceived())... HB
  863. mMutex.lock();
  864. if (!mDirty && !is_sculpted)
  865. {
  866. mMutex.unlock();
  867. return false;
  868. }
  869. mDirty = false;
  870. if (detail < 0.f)
  871. {
  872. llwarns << "Attempt to generate profile with negative LOD: " << detail
  873. << ". Clamping it to 0." << llendl;
  874. detail = 0.f;
  875. }
  876. mVertices.resize(0);
  877. mFaces.resize(0);
  878. // Generate the face data
  879. S32 i;
  880. F32 begin = params.getBegin();
  881. F32 end = params.getEnd();
  882. F32 hollow = params.getHollow();
  883. // Quick validation to eliminate some server crashes.
  884. if (begin > end - 0.01f)
  885. {
  886. mMutex.unlock();
  887. llwarns << "Assertion 'begin >= end' failed; aborting." << llendl;
  888. return false;
  889. }
  890. S32 face_num = 0;
  891. switch (params.getCurveType() & LL_PCODE_PROFILE_MASK)
  892. {
  893. case LL_PCODE_PROFILE_SQUARE:
  894. {
  895. genNGon(params, 4, -0.375f, 1.f, split);
  896. if (path_open)
  897. {
  898. addCap (LL_FACE_PATH_BEGIN);
  899. }
  900. for (i = llfloor(begin * 4.f); i < llfloor(end * 4.f + .999f); ++i)
  901. {
  902. addFace((face_num++) * (split + 1), split + 2, 1,
  903. LL_FACE_OUTER_SIDE_0 << i, true);
  904. }
  905. LLVector4a scale(1, 1, 4, 1);
  906. S32 count = mVertices.size();
  907. for (i = 0; i < count; ++i)
  908. {
  909. // Scale by 4 to generate proper tex coords.
  910. mVertices[i].mul(scale);
  911. llassert(mVertices[i].isFinite3());
  912. }
  913. if (hollow)
  914. {
  915. switch (params.getCurveType() & LL_PCODE_HOLE_MASK)
  916. {
  917. case LL_PCODE_HOLE_TRIANGLE:
  918. // This offset is not correct, but we cannot change it
  919. // now... DK 11/17/04
  920. addHole(params, true, 3, -0.375f, hollow, 1.f, split);
  921. break;
  922. case LL_PCODE_HOLE_CIRCLE:
  923. // *TODO: Compute actual detail levels for cubes
  924. addHole(params, false, MIN_DETAIL_FACES * detail,
  925. -0.375f, hollow, 1.f);
  926. break;
  927. default: // LL_PCODE_HOLE_SAME, LL_PCODE_HOLE_SQUARE
  928. addHole(params, true, 4, -0.375f, hollow, 1.f, split);
  929. }
  930. }
  931. if (path_open)
  932. {
  933. mFaces[0].mCount = mTotal;
  934. }
  935. break;
  936. }
  937. case LL_PCODE_PROFILE_ISOTRI:
  938. case LL_PCODE_PROFILE_RIGHTTRI:
  939. case LL_PCODE_PROFILE_EQUALTRI:
  940. {
  941. genNGon(params, 3, 0.f, 1.f, split);
  942. LLVector4a scale(1, 1, 3, 1);
  943. S32 count = mVertices.size();
  944. for (i = 0; i < count; ++i)
  945. {
  946. // Scale by 3 to generate proper tex coords.
  947. mVertices[i].mul(scale);
  948. llassert(mVertices[i].isFinite3());
  949. }
  950. if (path_open)
  951. {
  952. addCap(LL_FACE_PATH_BEGIN);
  953. }
  954. for (i = llfloor(begin * 3.f); i < llfloor(end * 3.f + .999f); ++i)
  955. {
  956. addFace((face_num++) * (split + 1), split + 2, 1,
  957. LL_FACE_OUTER_SIDE_0 << i, true);
  958. }
  959. if (hollow)
  960. {
  961. // Swept triangles need smaller hollowness values, because the
  962. // triangle doesn't fill the bounding box.
  963. F32 triangle_hollow = hollow * 0.5f;
  964. switch (params.getCurveType() & LL_PCODE_HOLE_MASK)
  965. {
  966. case LL_PCODE_HOLE_CIRCLE:
  967. // *TODO: Actually generate level of detail for
  968. // triangles
  969. addHole(params, false, MIN_DETAIL_FACES * detail, 0,
  970. triangle_hollow, 1.f);
  971. break;
  972. case LL_PCODE_HOLE_SQUARE:
  973. addHole(params, true, 4, 0, triangle_hollow, 1.f,
  974. split);
  975. break;
  976. default: // LL_PCODE_HOLE_SAME, LL_PCODE_HOLE_TRIANGLE
  977. addHole(params, true, 3, 0, triangle_hollow, 1.f,
  978. split);
  979. }
  980. }
  981. break;
  982. }
  983. case LL_PCODE_PROFILE_CIRCLE:
  984. {
  985. // If this has a square hollow, we should adjust the number of
  986. // faces a bit so that the geometry lines up.
  987. U8 hole_type = 0;
  988. F32 circle_detail = MIN_DETAIL_FACES * detail;
  989. if (hollow)
  990. {
  991. hole_type = params.getCurveType() & LL_PCODE_HOLE_MASK;
  992. if (hole_type == LL_PCODE_HOLE_SQUARE)
  993. {
  994. // Snap to the next multiple of four sides, so that corners
  995. // line up.
  996. circle_detail = llceil(circle_detail * 0.25f) * 4.f;
  997. }
  998. }
  999. S32 sides = (S32)circle_detail;
  1000. if (is_sculpted)
  1001. {
  1002. sides = sculpt_size;
  1003. }
  1004. if (sides > 0)
  1005. {
  1006. genNGon(params, sides);
  1007. }
  1008. if (path_open)
  1009. {
  1010. addCap (LL_FACE_PATH_BEGIN);
  1011. }
  1012. if (mOpen && !hollow)
  1013. {
  1014. addFace(0, mTotal - 1, 0, LL_FACE_OUTER_SIDE_0, false);
  1015. }
  1016. else
  1017. {
  1018. addFace(0, mTotal, 0, LL_FACE_OUTER_SIDE_0, false);
  1019. }
  1020. if (hollow)
  1021. {
  1022. switch (hole_type)
  1023. {
  1024. case LL_PCODE_HOLE_SQUARE:
  1025. addHole(params, true, 4, 0, hollow, 1.f, split);
  1026. break;
  1027. case LL_PCODE_HOLE_TRIANGLE:
  1028. addHole(params, true, 3, 0, hollow, 1.f, split);
  1029. break;
  1030. default: // LL_PCODE_HOLE_SAME, LL_PCODE_HOLE_CIRCLE
  1031. addHole(params, false, circle_detail, 0, hollow, 1.f);
  1032. }
  1033. }
  1034. break;
  1035. }
  1036. case LL_PCODE_PROFILE_CIRCLE_HALF:
  1037. {
  1038. // If this has a square hollow, we should adjust the number of
  1039. // faces a bit so that the geometry lines up.
  1040. U8 hole_type = 0;
  1041. // Number of faces is cut in half because it's only a half-circle.
  1042. F32 circle_detail = MIN_DETAIL_FACES * detail * 0.5f;
  1043. if (hollow)
  1044. {
  1045. hole_type = params.getCurveType() & LL_PCODE_HOLE_MASK;
  1046. if (hole_type == LL_PCODE_HOLE_SQUARE)
  1047. {
  1048. // Snap to the next multiple of four sides (div 2),
  1049. // so that corners line up.
  1050. circle_detail = llceil(circle_detail * 0.5f) * 2.f;
  1051. }
  1052. }
  1053. genNGon(params, llfloor(circle_detail), 0.5f, 0.5f);
  1054. if (path_open)
  1055. {
  1056. addCap(LL_FACE_PATH_BEGIN);
  1057. }
  1058. if (mOpen && !params.getHollow())
  1059. {
  1060. addFace(0, mTotal - 1, 0, LL_FACE_OUTER_SIDE_0, false);
  1061. }
  1062. else
  1063. {
  1064. addFace(0, mTotal, 0, LL_FACE_OUTER_SIDE_0, false);
  1065. }
  1066. if (hollow)
  1067. {
  1068. switch (hole_type)
  1069. {
  1070. case LL_PCODE_HOLE_SQUARE:
  1071. addHole(params, true, 2, 0.5f, hollow, 0.5f, split);
  1072. break;
  1073. case LL_PCODE_HOLE_TRIANGLE:
  1074. addHole(params, true, 3, 0.5f, hollow, 0.5f, split);
  1075. break;
  1076. default: // LL_PCODE_HOLE_SAME, LL_PCODE_HOLE_CIRCLE
  1077. addHole(params, false, circle_detail, 0.5f, hollow,
  1078. 0.5f);
  1079. }
  1080. }
  1081. // Special case for openness of sphere
  1082. if (params.getEnd() - params.getBegin() < 1.f)
  1083. {
  1084. mOpen = true;
  1085. }
  1086. else if (!hollow)
  1087. {
  1088. mOpen = false;
  1089. mVertices.push_back(mVertices[0]);
  1090. llassert(mVertices[0].isFinite3());
  1091. ++mTotal;
  1092. }
  1093. break;
  1094. }
  1095. default:
  1096. llerrs << "Unknown profile: getCurveType() = "
  1097. << params.getCurveType() << llendl;
  1098. }
  1099. if (path_open)
  1100. {
  1101. addCap(LL_FACE_PATH_END); // bottom
  1102. }
  1103. if (mOpen) // Interior edge caps
  1104. {
  1105. addFace(mTotal - 1, 2, 0.5, LL_FACE_PROFILE_BEGIN, true);
  1106. if (hollow)
  1107. {
  1108. addFace(mTotalOut - 1, 2, 0.5, LL_FACE_PROFILE_END, true);
  1109. }
  1110. else
  1111. {
  1112. addFace(mTotal - 2, 2, 0.5, LL_FACE_PROFILE_END, true);
  1113. }
  1114. }
  1115. mMutex.unlock();
  1116. return true;
  1117. }
  1118. bool LLProfileParams::importFile(LLFILE* fp)
  1119. {
  1120. constexpr S32 BUFSIZE = 16384;
  1121. char buffer[BUFSIZE];
  1122. // *NOTE: changing the size or type of these buffers would require changing
  1123. // the sscanf below.
  1124. char keyword[256];
  1125. char valuestr[256];
  1126. keyword[0] = 0;
  1127. valuestr[0] = 0;
  1128. F32 tempF32;
  1129. U32 tempU32;
  1130. while (!feof(fp))
  1131. {
  1132. if (fgets(buffer, BUFSIZE, fp) == NULL)
  1133. {
  1134. buffer[0] = '\0';
  1135. }
  1136. sscanf(buffer, " %255s %255s", keyword, valuestr);
  1137. if (!strcmp("{", keyword))
  1138. {
  1139. continue;
  1140. }
  1141. if (!strcmp("}", keyword))
  1142. {
  1143. break;
  1144. }
  1145. else if (!strcmp("curve", keyword))
  1146. {
  1147. sscanf(valuestr, "%d", &tempU32);
  1148. setCurveType((U8)tempU32);
  1149. }
  1150. else if (!strcmp("begin", keyword))
  1151. {
  1152. sscanf(valuestr, "%g", &tempF32);
  1153. setBegin(tempF32);
  1154. }
  1155. else if (!strcmp("end", keyword))
  1156. {
  1157. sscanf(valuestr, "%g", &tempF32);
  1158. setEnd(tempF32);
  1159. }
  1160. else if (!strcmp("hollow", keyword))
  1161. {
  1162. sscanf(valuestr, "%g", &tempF32);
  1163. setHollow(tempF32);
  1164. }
  1165. else
  1166. {
  1167. llwarns << "Unknown keyword '" << keyword << "' in profile import."
  1168. << llendl;
  1169. }
  1170. }
  1171. return true;
  1172. }
  1173. bool LLProfileParams::exportFile(LLFILE* fp) const
  1174. {
  1175. fprintf(fp, "\t\tprofile 0\n");
  1176. fprintf(fp, "\t\t{\n");
  1177. fprintf(fp, "\t\t\tcurve\t%d\n", getCurveType());
  1178. fprintf(fp, "\t\t\tbegin\t%g\n", getBegin());
  1179. fprintf(fp, "\t\t\tend\t%g\n", getEnd());
  1180. fprintf(fp, "\t\t\thollow\t%g\n", getHollow());
  1181. fprintf(fp, "\t\t}\n");
  1182. return true;
  1183. }
  1184. bool LLProfileParams::importLegacyStream(std::istream& input_stream)
  1185. {
  1186. constexpr S32 BUFSIZE = 16384;
  1187. char buffer[BUFSIZE];
  1188. // *NOTE: changing the size or type of these buffers would require changing
  1189. // the sscanf below.
  1190. char keyword[256];
  1191. char valuestr[256];
  1192. keyword[0] = 0;
  1193. valuestr[0] = 0;
  1194. F32 tempF32;
  1195. U32 tempU32;
  1196. while (input_stream.good())
  1197. {
  1198. input_stream.getline(buffer, BUFSIZE);
  1199. sscanf(buffer, " %255s %255s", keyword, valuestr);
  1200. if (!strcmp("{", keyword))
  1201. {
  1202. continue;
  1203. }
  1204. if (!strcmp("}", keyword))
  1205. {
  1206. break;
  1207. }
  1208. else if (!strcmp("curve", keyword))
  1209. {
  1210. sscanf(valuestr, "%d", &tempU32);
  1211. setCurveType((U8)tempU32);
  1212. }
  1213. else if (!strcmp("begin", keyword))
  1214. {
  1215. sscanf(valuestr, "%g", &tempF32);
  1216. setBegin(tempF32);
  1217. }
  1218. else if (!strcmp("end", keyword))
  1219. {
  1220. sscanf(valuestr, "%g", &tempF32);
  1221. setEnd(tempF32);
  1222. }
  1223. else if (!strcmp("hollow", keyword))
  1224. {
  1225. sscanf(valuestr, "%g", &tempF32);
  1226. setHollow(tempF32);
  1227. }
  1228. else
  1229. {
  1230. llwarns << "Unknown keyword " << keyword << " in profile import"
  1231. << llendl;
  1232. }
  1233. }
  1234. return true;
  1235. }
  1236. bool LLProfileParams::exportLegacyStream(std::ostream& output_stream) const
  1237. {
  1238. output_stream <<"\t\tprofile 0\n";
  1239. output_stream <<"\t\t{\n";
  1240. output_stream <<"\t\t\tcurve\t" << (S32) getCurveType() << "\n";
  1241. output_stream <<"\t\t\tbegin\t" << getBegin() << "\n";
  1242. output_stream <<"\t\t\tend\t" << getEnd() << "\n";
  1243. output_stream <<"\t\t\thollow\t" << getHollow() << "\n";
  1244. output_stream << "\t\t}\n";
  1245. return true;
  1246. }
  1247. LLSD LLProfileParams::asLLSD() const
  1248. {
  1249. LLSD sd;
  1250. sd["curve"] = getCurveType();
  1251. sd["begin"] = getBegin();
  1252. sd["end"] = getEnd();
  1253. sd["hollow"] = getHollow();
  1254. return sd;
  1255. }
  1256. bool LLProfileParams::fromLLSD(LLSD& sd)
  1257. {
  1258. setCurveType(sd["curve"].asInteger());
  1259. setBegin((F32)sd["begin"].asReal());
  1260. setEnd((F32)sd["end"].asReal());
  1261. setHollow((F32)sd["hollow"].asReal());
  1262. return true;
  1263. }
  1264. void LLProfileParams::copyParams(const LLProfileParams& params)
  1265. {
  1266. setCurveType(params.getCurveType());
  1267. setBegin(params.getBegin());
  1268. setEnd(params.getEnd());
  1269. setHollow(params.getHollow());
  1270. }
  1271. // This is basically LLPath::genNGon stripped down to only operations that
  1272. // influence the number of points added
  1273. S32 LLPath::getNumNGonPoints(const LLPathParams& params, S32 sides)
  1274. {
  1275. F32 step = 1.f / sides;
  1276. F32 t = params.getBegin() + step;
  1277. // Snap to a quantized parameter, so that cut does not affect most sample
  1278. // points.
  1279. t = ((S32)(t * sides)) / (F32)sides;
  1280. S32 ret = 1;
  1281. // Run through the non-cut dependent points.
  1282. while (t < params.getEnd())
  1283. {
  1284. ++ret;
  1285. t += step;
  1286. }
  1287. return ++ret;
  1288. }
  1289. // Generates a circular path, starting at (1, 0, 0), counter-clockwise along
  1290. // the xz plane.
  1291. void LLPath::genNGon(const LLPathParams& params, S32 sides, F32 end_scale,
  1292. F32 twist_scale)
  1293. {
  1294. F32 revolutions = params.getRevolutions();
  1295. F32 skew = params.getSkew();
  1296. F32 skew_mag = fabsf(skew);
  1297. F32 hole_x = params.getScaleX() * (1.f - skew_mag);
  1298. F32 hole_y = params.getScaleY();
  1299. // Calculate taper begin/end for x,y (Negative means taper the beginning)
  1300. F32 taper_x_begin = 1.f;
  1301. F32 taper_x_end = 1.f - params.getTaperX();
  1302. F32 taper_y_begin = 1.f;
  1303. F32 taper_y_end = 1.f - params.getTaperY();
  1304. if (taper_x_end > 1.f)
  1305. {
  1306. // Flip tapering.
  1307. taper_x_begin = 2.f - taper_x_end;
  1308. taper_x_end = 1.f;
  1309. }
  1310. if (taper_y_end > 1.f)
  1311. {
  1312. // Flip tapering.
  1313. taper_y_begin = 2.f - taper_y_end;
  1314. taper_y_end = 1.f;
  1315. }
  1316. // For spheres, the radius is usually zero.
  1317. F32 radius_start = 0.5f;
  1318. if (sides < 8)
  1319. {
  1320. radius_start = sTableScale[sides];
  1321. }
  1322. // Scale the radius to take the hole size into account.
  1323. radius_start *= 1.f - hole_y;
  1324. // Now check the radius offset to calculate the start,end radius
  1325. // (negative means decrease the start radius instead).
  1326. F32 radius_end = radius_start;
  1327. F32 radius_offset = params.getRadiusOffset();
  1328. if (radius_offset < 0.f)
  1329. {
  1330. radius_start *= 1.f + radius_offset;
  1331. }
  1332. else
  1333. {
  1334. radius_end *= 1.f - radius_offset;
  1335. }
  1336. // Is the path NOT a closed loop ?
  1337. mOpen = params.getEnd() * end_scale - params.getBegin() < 1.f ||
  1338. skew_mag > 0.001f || fabsf(taper_x_end - taper_x_begin) > 0.001f ||
  1339. fabsf(taper_y_end - taper_y_begin) > 0.001f ||
  1340. fabsf(radius_end - radius_start) > 0.001f;
  1341. LLVector3 path_axis(1.f, 0.f, 0.f);
  1342. F32 twist_begin = params.getTwistBegin() * twist_scale;
  1343. F32 twist_end = params.getTwistEnd() * twist_scale;
  1344. // We run through this once before the main loop, to make sure the path
  1345. // begins at the correct cut.
  1346. F32 step = 1.f / sides;
  1347. F32 t = params.getBegin();
  1348. PathPt* pt = mPath.append(1);
  1349. F32 ang = 2.f * F_PI * revolutions * t;
  1350. F32 s = sinf(ang) * lerp(radius_start, radius_end, t);
  1351. F32 c = cosf(ang) * lerp(radius_start, radius_end, t);
  1352. pt->mPos.set(lerp(0, params.getShear().mV[0], s) +
  1353. lerp(-skew, skew, t) * 0.5f,
  1354. c + lerp(0, params.getShear().mV[1], s), s);
  1355. pt->mScale.set(hole_x * lerp(taper_x_begin, taper_x_end, t),
  1356. hole_y * lerp(taper_y_begin, taper_y_end, t), 0, 1);
  1357. pt->mTexT = t;
  1358. // Twist rotates the path along the x,y plane (I think) - DJS 04/05/02
  1359. LLQuaternion twist;
  1360. twist.setAngleAxis(lerp(twist_begin, twist_end, t) * 2.f * F_PI - F_PI,
  1361. 0.f, 0.f, 1.f);
  1362. // Rotate the point around the circle's center.
  1363. LLQuaternion qang;
  1364. qang.setAngleAxis(ang, path_axis);
  1365. LLMatrix3 rot(twist * qang);
  1366. pt->mRot.loadu(rot);
  1367. t += step;
  1368. // Snap to a quantized parameter, so that cut does not affect most sample
  1369. // points.
  1370. t = ((S32)(t * sides)) / (F32)sides;
  1371. // Run through the non-cut dependent points.
  1372. while (t < params.getEnd())
  1373. {
  1374. pt = mPath.append(1);
  1375. ang = 2.f * F_PI * revolutions * t;
  1376. c = cosf(ang) * lerp(radius_start, radius_end, t);
  1377. s = sinf(ang) * lerp(radius_start, radius_end, t);
  1378. pt->mPos.set(lerp(0, params.getShear().mV[0], s) +
  1379. lerp(-skew, skew, t) * 0.5f,
  1380. c + lerp(0, params.getShear().mV[1], s), s);
  1381. pt->mScale.set(hole_x * lerp(taper_x_begin, taper_x_end, t),
  1382. hole_y * lerp(taper_y_begin, taper_y_end, t), 0, 1);
  1383. pt->mTexT = t;
  1384. // Twist rotates the path along the x,y plane (I think) - DJS 04/05/02
  1385. twist.setAngleAxis(lerp(twist_begin, twist_end, t) * 2.f * F_PI - F_PI,
  1386. 0.f, 0.f, 1.f);
  1387. // Rotate the point around the circle's center.
  1388. qang.setAngleAxis(ang, path_axis);
  1389. LLMatrix3 tmp(twist * qang);
  1390. pt->mRot.loadu(tmp);
  1391. t += step;
  1392. }
  1393. // Make one final pass for the end cut.
  1394. t = params.getEnd();
  1395. pt = mPath.append(1);
  1396. ang = 2.f * F_PI * revolutions * t;
  1397. c = cosf(ang) * lerp(radius_start, radius_end, t);
  1398. s = sinf(ang) * lerp(radius_start, radius_end, t);
  1399. pt->mPos.set(lerp(0, params.getShear().mV[0], s) +
  1400. lerp(-skew, skew, t) * 0.5f,
  1401. c + lerp(0, params.getShear().mV[1], s), s);
  1402. pt->mScale.set(hole_x * lerp(taper_x_begin, taper_x_end, t),
  1403. hole_y * lerp(taper_y_begin, taper_y_end, t), 0, 1);
  1404. pt->mTexT = t;
  1405. // Twist rotates the path along the x,y plane (I think) - DJS 04/05/02
  1406. twist.setAngleAxis(lerp(twist_begin, twist_end, t) * 2.f * F_PI - F_PI,
  1407. 0.f, 0.f, 1.f);
  1408. // Rotate the point around the circle's center.
  1409. qang.setAngleAxis(ang, path_axis);
  1410. LLMatrix3 tmp(twist * qang);
  1411. pt->mRot.loadu(tmp);
  1412. mTotal = mPath.size();
  1413. }
  1414. LLVector2 LLPathParams::getBeginScale() const
  1415. {
  1416. LLVector2 begin_scale(1.f, 1.f);
  1417. if (getScaleX() > 1)
  1418. {
  1419. begin_scale.mV[0] = 2.f - getScaleX();
  1420. }
  1421. if (getScaleY() > 1)
  1422. {
  1423. begin_scale.mV[1] = 2.f - getScaleY();
  1424. }
  1425. return begin_scale;
  1426. }
  1427. LLVector2 LLPathParams::getEndScale() const
  1428. {
  1429. LLVector2 end_scale(1.f, 1.f);
  1430. if (getScaleX() < 1)
  1431. {
  1432. end_scale.mV[0] = getScaleX();
  1433. }
  1434. if (getScaleY() < 1)
  1435. {
  1436. end_scale.mV[1] = getScaleY();
  1437. }
  1438. return end_scale;
  1439. }
  1440. // This is basically LLPath::generate stripped down to only the operations
  1441. // that influence the number of points
  1442. S32 LLPath::getNumPoints(const LLPathParams& params, F32 detail)
  1443. {
  1444. if (detail < 0.f)
  1445. {
  1446. detail = 0.f;
  1447. }
  1448. S32 np = 2; // Hardcode for line
  1449. // Is this 0xf0 mask really necessary? DK 03/02/05
  1450. switch (params.getCurveType() & 0xf0)
  1451. {
  1452. case LL_PCODE_PATH_CIRCLE:
  1453. {
  1454. // Increase the detail as the revolutions and twist increase.
  1455. F32 twist_mag = fabsf(params.getTwistBegin() -
  1456. params.getTwistEnd());
  1457. S32 sides = (S32)llfloor(llfloor((MIN_DETAIL_FACES * detail +
  1458. twist_mag * 3.5f * (detail - 0.5f))) *
  1459. params.getRevolutions());
  1460. np = sides;
  1461. break;
  1462. }
  1463. case LL_PCODE_PATH_CIRCLE2:
  1464. {
  1465. np = getNumNGonPoints(params, llfloor(MIN_DETAIL_FACES * detail));
  1466. break;
  1467. }
  1468. case LL_PCODE_PATH_TEST:
  1469. {
  1470. np = 5;
  1471. break;
  1472. }
  1473. //case LL_PCODE_PATH_LINE:
  1474. default:
  1475. {
  1476. // Take the begin/end twist into account for detail.
  1477. np = llfloor(fabsf(params.getTwistBegin() - params.getTwistEnd()) *
  1478. 3.5f * (detail - 0.5f)) + 2;
  1479. }
  1480. }
  1481. return np;
  1482. }
  1483. bool LLPath::generate(const LLPathParams& params, F32 detail, S32 split,
  1484. bool is_sculpted, S32 sculpt_size)
  1485. {
  1486. if (!mDirty && !is_sculpted)
  1487. {
  1488. return false;
  1489. }
  1490. if (detail < 0.f)
  1491. {
  1492. llwarns << "Attempt to generating path with negative LOD: " << detail
  1493. << ". Clamping it to 0." << llendl;
  1494. detail = 0.f;
  1495. }
  1496. mDirty = false;
  1497. S32 np = 2; // hardcode for line
  1498. mPath.resize(0);
  1499. mOpen = true;
  1500. // Is this 0xf0 mask really necessary ? DK 03/02/05
  1501. switch (params.getCurveType() & 0xf0)
  1502. {
  1503. case LL_PCODE_PATH_CIRCLE:
  1504. {
  1505. // Increase the detail as the revolutions and twist increase.
  1506. F32 twist_mag = fabsf(params.getTwistBegin() -
  1507. params.getTwistEnd());
  1508. S32 sides = (S32)llfloor(llfloor((MIN_DETAIL_FACES * detail +
  1509. twist_mag * 3.5f *
  1510. (detail - 0.5f))) *
  1511. params.getRevolutions());
  1512. if (is_sculpted)
  1513. {
  1514. sides = llmax(sculpt_size, 1);
  1515. }
  1516. if (sides > 0)
  1517. {
  1518. genNGon(params, sides);
  1519. }
  1520. break;
  1521. }
  1522. case LL_PCODE_PATH_CIRCLE2:
  1523. {
  1524. if (params.getEnd() - params.getBegin() >= 0.99f &&
  1525. params.getScaleX() >= .99f)
  1526. {
  1527. mOpen = false;
  1528. }
  1529. genNGon(params, llfloor(MIN_DETAIL_FACES * detail));
  1530. F32 toggle = 0.5f;
  1531. for (S32 i = 0, count = (S32)mPath.size(); i < count; ++i)
  1532. {
  1533. mPath[i].mPos.getF32ptr()[0] = toggle;
  1534. if (toggle == 0.5f)
  1535. {
  1536. toggle = -0.5f;
  1537. }
  1538. else
  1539. {
  1540. toggle = 0.5f;
  1541. }
  1542. }
  1543. break;
  1544. }
  1545. case LL_PCODE_PATH_TEST:
  1546. {
  1547. np = 5;
  1548. mStep = 1.f / (F32)(np - 1);
  1549. mPath.resize(np);
  1550. LLQuaternion quat;
  1551. for (S32 i = 0; i < np; ++i)
  1552. {
  1553. F32 t = F32(i) * mStep;
  1554. F32 twist_angle = F_PI * params.getTwistEnd() * t;
  1555. mPath[i].mPos.set(0.f,
  1556. lerp(0.f, -sinf(twist_angle) * 0.5f, t),
  1557. lerp(-0.5, cosf(twist_angle) * 0.5f, t));
  1558. mPath[i].mScale.set(lerp(1.f, params.getScale().mV[0], t),
  1559. lerp(1.f, params.getScale().mV[1], t),
  1560. 0.f, 1.f);
  1561. mPath[i].mTexT = t;
  1562. quat.setAngleAxis(twist_angle, 1.f, 0.f, 0.f);
  1563. LLMatrix3 tmp(quat);
  1564. mPath[i].mRot.loadu(tmp);
  1565. }
  1566. break;
  1567. }
  1568. //case LL_PCODE_PATH_LINE:
  1569. default:
  1570. {
  1571. // Take the begin/end twist into account for detail.
  1572. np = llfloor(fabsf(params.getTwistBegin() - params.getTwistEnd()) *
  1573. 3.5f * (detail - 0.5f)) + 2;
  1574. if (np < split + 2)
  1575. {
  1576. np = split + 2;
  1577. }
  1578. mStep = 1.f / (np - 1);
  1579. mPath.resize(np);
  1580. LLVector2 start_scale = params.getBeginScale();
  1581. LLVector2 end_scale = params.getEndScale();
  1582. for (S32 i = 0; i < np; ++i)
  1583. {
  1584. F32 t = lerp(params.getBegin(), params.getEnd(),
  1585. (F32)i * mStep);
  1586. mPath[i].mPos.set(lerp(0, params.getShear().mV[0], t),
  1587. lerp(0, params.getShear().mV[1], t),
  1588. t - 0.5f);
  1589. LLQuaternion quat;
  1590. quat.setAngleAxis(lerp(F_PI * params.getTwistBegin(),
  1591. F_PI * params.getTwistEnd(), t),
  1592. 0.f, 0.f, 1.f);
  1593. LLMatrix3 tmp(quat);
  1594. mPath[i].mRot.loadu(tmp);
  1595. mPath[i].mScale.set(lerp(start_scale.mV[0],
  1596. end_scale.mV[0], t),
  1597. lerp(start_scale.mV[1],
  1598. end_scale.mV[1], t),
  1599. 0.f, 1.f);
  1600. mPath[i].mTexT = t;
  1601. }
  1602. }
  1603. }
  1604. if (params.getTwistEnd() != params.getTwistBegin())
  1605. {
  1606. mOpen = true;
  1607. }
  1608. #if 0
  1609. if ((S32(fabsf(params.getTwistEnd() -
  1610. params.getTwistBegin()) * 100)) % 100 != 0)
  1611. {
  1612. mOpen = true;
  1613. }
  1614. #endif
  1615. return true;
  1616. }
  1617. bool LLDynamicPath::generate(const LLPathParams& params, F32 detail, S32 split,
  1618. bool is_sculpted, S32 sculpt_size)
  1619. {
  1620. mOpen = true; // Draw end caps
  1621. if (getPathLength() == 0)
  1622. {
  1623. // Path has not been generated yet. Some algorithms later assume at
  1624. // least TWO path points.
  1625. resizePath(2);
  1626. LLQuaternion quat;
  1627. quat.setEulerAngles(0.f, 0.f, 0.f);
  1628. LLMatrix3 tmp(quat);
  1629. for (U32 i = 0; i < 2; ++i)
  1630. {
  1631. mPath[i].mPos.set(0.f, 0.f, 0.f);
  1632. mPath[i].mRot.loadu(tmp);
  1633. mPath[i].mScale.set(1.f, 1.f, 0.f, 1.f);
  1634. mPath[i].mTexT = 0.f;
  1635. }
  1636. }
  1637. return true;
  1638. }
  1639. bool LLPathParams::importFile(LLFILE* fp)
  1640. {
  1641. constexpr S32 BUFSIZE = 16384;
  1642. char buffer[BUFSIZE];
  1643. // *NOTE: changing the size or type of these buffers would require changing
  1644. // the sscanf below.
  1645. char keyword[256];
  1646. char valuestr[256];
  1647. keyword[0] = 0;
  1648. valuestr[0] = 0;
  1649. F32 tempF32;
  1650. F32 x, y;
  1651. U32 tempU32;
  1652. while (!feof(fp))
  1653. {
  1654. if (fgets(buffer, BUFSIZE, fp) == NULL)
  1655. {
  1656. buffer[0] = '\0';
  1657. }
  1658. sscanf(buffer, " %255s %255s", keyword, valuestr);
  1659. if (!strcmp("{", keyword))
  1660. {
  1661. continue;
  1662. }
  1663. if (!strcmp("}", keyword))
  1664. {
  1665. break;
  1666. }
  1667. else if (!strcmp("curve", keyword))
  1668. {
  1669. sscanf(valuestr, "%d", &tempU32);
  1670. setCurveType((U8)tempU32);
  1671. }
  1672. else if (!strcmp("begin", keyword))
  1673. {
  1674. sscanf(valuestr, "%g", &tempF32);
  1675. setBegin(tempF32);
  1676. }
  1677. else if (!strcmp("end", keyword))
  1678. {
  1679. sscanf(valuestr, "%g", &tempF32);
  1680. setEnd(tempF32);
  1681. }
  1682. else if (!strcmp("scale", keyword))
  1683. {
  1684. // Legacy for one dimensional scale per path
  1685. sscanf(valuestr, "%g", &tempF32);
  1686. setScale(tempF32, tempF32);
  1687. }
  1688. else if (!strcmp("scale_x", keyword))
  1689. {
  1690. sscanf(valuestr, "%g", &x);
  1691. setScaleX(x);
  1692. }
  1693. else if (!strcmp("scale_y", keyword))
  1694. {
  1695. sscanf(valuestr, "%g", &y);
  1696. setScaleY(y);
  1697. }
  1698. else if (!strcmp("shear_x", keyword))
  1699. {
  1700. sscanf(valuestr, "%g", &x);
  1701. setShearX(x);
  1702. }
  1703. else if (!strcmp("shear_y", keyword))
  1704. {
  1705. sscanf(valuestr, "%g", &y);
  1706. setShearY(y);
  1707. }
  1708. else if (!strcmp("twist", keyword))
  1709. {
  1710. sscanf(valuestr, "%g", &tempF32);
  1711. setTwistEnd(tempF32);
  1712. }
  1713. else if (!strcmp("twist_begin", keyword))
  1714. {
  1715. sscanf(valuestr, "%g", &y);
  1716. setTwistBegin(y);
  1717. }
  1718. else if (!strcmp("radius_offset", keyword))
  1719. {
  1720. sscanf(valuestr, "%g", &y);
  1721. setRadiusOffset(y);
  1722. }
  1723. else if (!strcmp("taper_x", keyword))
  1724. {
  1725. sscanf(valuestr, "%g", &y);
  1726. setTaperX(y);
  1727. }
  1728. else if (!strcmp("taper_y", keyword))
  1729. {
  1730. sscanf(valuestr, "%g", &y);
  1731. setTaperY(y);
  1732. }
  1733. else if (!strcmp("revolutions", keyword))
  1734. {
  1735. sscanf(valuestr, "%g", &y);
  1736. setRevolutions(y);
  1737. }
  1738. else if (!strcmp("skew", keyword))
  1739. {
  1740. sscanf(valuestr, "%g", &y);
  1741. setSkew(y);
  1742. }
  1743. else
  1744. {
  1745. llwarns << "Unknown keyword '" << keyword << "' in path import."
  1746. << llendl;
  1747. }
  1748. }
  1749. return true;
  1750. }
  1751. bool LLPathParams::exportFile(LLFILE* fp) const
  1752. {
  1753. fprintf(fp, "\t\tpath 0\n");
  1754. fprintf(fp, "\t\t{\n");
  1755. fprintf(fp, "\t\t\tcurve\t%d\n", getCurveType());
  1756. fprintf(fp, "\t\t\tbegin\t%g\n", getBegin());
  1757. fprintf(fp, "\t\t\tend\t%g\n", getEnd());
  1758. fprintf(fp, "\t\t\tscale_x\t%g\n", getScaleX());
  1759. fprintf(fp, "\t\t\tscale_y\t%g\n", getScaleY());
  1760. fprintf(fp, "\t\t\tshear_x\t%g\n", getShearX());
  1761. fprintf(fp, "\t\t\tshear_y\t%g\n", getShearY());
  1762. fprintf(fp, "\t\t\ttwist\t%g\n", getTwistEnd());
  1763. fprintf(fp, "\t\t\ttwist_begin\t%g\n", getTwistBegin());
  1764. fprintf(fp, "\t\t\tradius_offset\t%g\n", getRadiusOffset());
  1765. fprintf(fp, "\t\t\ttaper_x\t%g\n", getTaperX());
  1766. fprintf(fp, "\t\t\ttaper_y\t%g\n", getTaperY());
  1767. fprintf(fp, "\t\t\trevolutions\t%g\n", getRevolutions());
  1768. fprintf(fp, "\t\t\tskew\t%g\n", getSkew());
  1769. fprintf(fp, "\t\t}\n");
  1770. return true;
  1771. }
  1772. bool LLPathParams::importLegacyStream(std::istream& input_stream)
  1773. {
  1774. constexpr S32 BUFSIZE = 16384;
  1775. char buffer[BUFSIZE];
  1776. // *NOTE: changing the size or type of these buffers would require changing
  1777. // the sscanf below.
  1778. char keyword[256];
  1779. char valuestr[256];
  1780. keyword[0] = 0;
  1781. valuestr[0] = 0;
  1782. F32 tempF32;
  1783. F32 x, y;
  1784. U32 tempU32;
  1785. while (input_stream.good())
  1786. {
  1787. input_stream.getline(buffer, BUFSIZE);
  1788. sscanf(buffer, " %255s %255s", keyword, valuestr);
  1789. if (!strcmp("{", keyword))
  1790. {
  1791. continue;
  1792. }
  1793. if (!strcmp("}", keyword))
  1794. {
  1795. break;
  1796. }
  1797. else if (!strcmp("curve", keyword))
  1798. {
  1799. sscanf(valuestr, "%d", &tempU32);
  1800. setCurveType((U8)tempU32);
  1801. }
  1802. else if (!strcmp("begin", keyword))
  1803. {
  1804. sscanf(valuestr, "%g", &tempF32);
  1805. setBegin(tempF32);
  1806. }
  1807. else if (!strcmp("end", keyword))
  1808. {
  1809. sscanf(valuestr, "%g", &tempF32);
  1810. setEnd(tempF32);
  1811. }
  1812. else if (!strcmp("scale", keyword))
  1813. {
  1814. // Legacy for one dimensional scale per path
  1815. sscanf(valuestr, "%g", &tempF32);
  1816. setScale(tempF32, tempF32);
  1817. }
  1818. else if (!strcmp("scale_x", keyword))
  1819. {
  1820. sscanf(valuestr, "%g", &x);
  1821. setScaleX(x);
  1822. }
  1823. else if (!strcmp("scale_y", keyword))
  1824. {
  1825. sscanf(valuestr, "%g", &y);
  1826. setScaleY(y);
  1827. }
  1828. else if (!strcmp("shear_x", keyword))
  1829. {
  1830. sscanf(valuestr, "%g", &x);
  1831. setShearX(x);
  1832. }
  1833. else if (!strcmp("shear_y", keyword))
  1834. {
  1835. sscanf(valuestr, "%g", &y);
  1836. setShearY(y);
  1837. }
  1838. else if (!strcmp("twist", keyword))
  1839. {
  1840. sscanf(valuestr, "%g", &tempF32);
  1841. setTwistEnd(tempF32);
  1842. }
  1843. else if (!strcmp("twist_begin", keyword))
  1844. {
  1845. sscanf(valuestr, "%g", &y);
  1846. setTwistBegin(y);
  1847. }
  1848. else if (!strcmp("radius_offset", keyword))
  1849. {
  1850. sscanf(valuestr, "%g", &y);
  1851. setRadiusOffset(y);
  1852. }
  1853. else if (!strcmp("taper_x", keyword))
  1854. {
  1855. sscanf(valuestr, "%g", &y);
  1856. setTaperX(y);
  1857. }
  1858. else if (!strcmp("taper_y", keyword))
  1859. {
  1860. sscanf(valuestr, "%g", &y);
  1861. setTaperY(y);
  1862. }
  1863. else if (!strcmp("revolutions", keyword))
  1864. {
  1865. sscanf(valuestr, "%g", &y);
  1866. setRevolutions(y);
  1867. }
  1868. else if (!strcmp("skew", keyword))
  1869. {
  1870. sscanf(valuestr, "%g", &y);
  1871. setSkew(y);
  1872. }
  1873. else
  1874. {
  1875. llwarns << "Unknown keyword '" << keyword << "' in path import."
  1876. << llendl;
  1877. }
  1878. }
  1879. return true;
  1880. }
  1881. bool LLPathParams::exportLegacyStream(std::ostream& output_stream) const
  1882. {
  1883. output_stream << "\t\tpath 0\n";
  1884. output_stream << "\t\t{\n";
  1885. output_stream << "\t\t\tcurve\t" << (S32) getCurveType() << "\n";
  1886. output_stream << "\t\t\tbegin\t" << getBegin() << "\n";
  1887. output_stream << "\t\t\tend\t" << getEnd() << "\n";
  1888. output_stream << "\t\t\tscale_x\t" << getScaleX() << "\n";
  1889. output_stream << "\t\t\tscale_y\t" << getScaleY() << "\n";
  1890. output_stream << "\t\t\tshear_x\t" << getShearX() << "\n";
  1891. output_stream << "\t\t\tshear_y\t" << getShearY() << "\n";
  1892. output_stream <<"\t\t\ttwist\t" << getTwistEnd() << "\n";
  1893. output_stream <<"\t\t\ttwist_begin\t" << getTwistBegin() << "\n";
  1894. output_stream <<"\t\t\tradius_offset\t" << getRadiusOffset() << "\n";
  1895. output_stream <<"\t\t\ttaper_x\t" << getTaperX() << "\n";
  1896. output_stream <<"\t\t\ttaper_y\t" << getTaperY() << "\n";
  1897. output_stream <<"\t\t\trevolutions\t" << getRevolutions() << "\n";
  1898. output_stream <<"\t\t\tskew\t" << getSkew() << "\n";
  1899. output_stream << "\t\t}\n";
  1900. return true;
  1901. }
  1902. LLSD LLPathParams::asLLSD() const
  1903. {
  1904. LLSD sd = LLSD();
  1905. sd["curve"] = getCurveType();
  1906. sd["begin"] = getBegin();
  1907. sd["end"] = getEnd();
  1908. sd["scale_x"] = getScaleX();
  1909. sd["scale_y"] = getScaleY();
  1910. sd["shear_x"] = getShearX();
  1911. sd["shear_y"] = getShearY();
  1912. sd["twist"] = getTwistEnd();
  1913. sd["twist_begin"] = getTwistBegin();
  1914. sd["radius_offset"] = getRadiusOffset();
  1915. sd["taper_x"] = getTaperX();
  1916. sd["taper_y"] = getTaperY();
  1917. sd["revolutions"] = getRevolutions();
  1918. sd["skew"] = getSkew();
  1919. return sd;
  1920. }
  1921. bool LLPathParams::fromLLSD(LLSD& sd)
  1922. {
  1923. setCurveType(sd["curve"].asInteger());
  1924. setBegin((F32)sd["begin"].asReal());
  1925. setEnd((F32)sd["end"].asReal());
  1926. setScaleX((F32)sd["scale_x"].asReal());
  1927. setScaleY((F32)sd["scale_y"].asReal());
  1928. setShearX((F32)sd["shear_x"].asReal());
  1929. setShearY((F32)sd["shear_y"].asReal());
  1930. setTwistEnd((F32)sd["twist"].asReal());
  1931. setTwistBegin((F32)sd["twist_begin"].asReal());
  1932. setRadiusOffset((F32)sd["radius_offset"].asReal());
  1933. setTaperX((F32)sd["taper_x"].asReal());
  1934. setTaperY((F32)sd["taper_y"].asReal());
  1935. setRevolutions((F32)sd["revolutions"].asReal());
  1936. setSkew((F32)sd["skew"].asReal());
  1937. return true;
  1938. }
  1939. void LLPathParams::copyParams(const LLPathParams& params)
  1940. {
  1941. setCurveType(params.getCurveType());
  1942. setBegin(params.getBegin());
  1943. setEnd(params.getEnd());
  1944. setScale(params.getScaleX(), params.getScaleY());
  1945. setShear(params.getShearX(), params.getShearY());
  1946. setTwistEnd(params.getTwistEnd());
  1947. setTwistBegin(params.getTwistBegin());
  1948. setRadiusOffset(params.getRadiusOffset());
  1949. setTaper(params.getTaperX(), params.getTaperY());
  1950. setRevolutions(params.getRevolutions());
  1951. setSkew(params.getSkew());
  1952. }
  1953. // Static member variables
  1954. U32 LLVolume::sLODCacheHit = 0;
  1955. U32 LLVolume::sLODCacheMiss = 0;
  1956. S32 LLVolume::sNumMeshPoints = 0;
  1957. bool LLVolume::sOptimizeCache = true;
  1958. LLVolume::LLVolume(const LLVolumeParams& params, F32 detail,
  1959. bool generate_single_face, bool is_unique)
  1960. : mParams(params),
  1961. mUnique(is_unique),
  1962. mGenerateSingleFace(generate_single_face),
  1963. mFaceMask(0x0),
  1964. mDetail(detail),
  1965. mSculptLevel(-2),
  1966. mSurfaceArea(1.f), // Only calculated for sculpts (1 for all other prims)
  1967. mIsMeshAssetLoaded(false),
  1968. mHullPoints(NULL),
  1969. mHullIndices(NULL),
  1970. mNumHullPoints(0),
  1971. mNumHullIndices(0),
  1972. mTrianglesCache(NULL)
  1973. {
  1974. mLODScaleBias.set(1.f, 1.f, 1.f);
  1975. // Set defaults
  1976. if (mParams.getPathParams().getCurveType() == LL_PCODE_PATH_FLEXIBLE)
  1977. {
  1978. mPathp = new LLDynamicPath();
  1979. }
  1980. else
  1981. {
  1982. mPathp = new LLPath();
  1983. }
  1984. generate();
  1985. if (mParams.getSculptID().isNull() &&
  1986. (mParams.getSculptType() == LL_SCULPT_TYPE_NONE ||
  1987. mParams.getSculptType() == LL_SCULPT_TYPE_MESH))
  1988. {
  1989. createVolumeFaces();
  1990. }
  1991. }
  1992. void LLVolume::resizePath(S32 length)
  1993. {
  1994. mPathp->resizePath(length);
  1995. mVolumeFaces.clear();
  1996. setDirty();
  1997. }
  1998. void LLVolume::regen()
  1999. {
  2000. generate();
  2001. createVolumeFaces();
  2002. }
  2003. void LLVolume::genTangents(S32 face)
  2004. {
  2005. mVolumeFaces[face].createTangents();
  2006. }
  2007. LLVolume::~LLVolume()
  2008. {
  2009. sNumMeshPoints -= mMesh.size();
  2010. delete mPathp;
  2011. mPathp = NULL;
  2012. if (mTrianglesCache)
  2013. {
  2014. delete mTrianglesCache;
  2015. mTrianglesCache = NULL;
  2016. }
  2017. mVolumeFaces.clear();
  2018. if (mHullPoints)
  2019. {
  2020. free_volume_mem(mHullPoints);
  2021. mHullPoints = NULL;
  2022. }
  2023. if (mHullIndices)
  2024. {
  2025. free_volume_mem(mHullIndices);
  2026. mHullIndices = NULL;
  2027. }
  2028. }
  2029. bool LLVolume::generate()
  2030. {
  2031. U8 path_type = mParams.getPathParams().getCurveType();
  2032. U8 profile_type = mParams.getProfileParams().getCurveType();
  2033. // Added 10.03.05 Dave Parks
  2034. // Split is a parameter to LLProfile::generate that tesselates edges on
  2035. // the profile to prevent lighting and texture interpolation errors on
  2036. // triangles that are stretched due to twisting or scaling on the path.
  2037. S32 split = (S32)(mDetail * 0.66f);
  2038. if (path_type == LL_PCODE_PATH_LINE &&
  2039. (mParams.getPathParams().getScale().mV[0] != 1.f ||
  2040. mParams.getPathParams().getScale().mV[1] != 1.f) &&
  2041. (profile_type == LL_PCODE_PROFILE_SQUARE ||
  2042. profile_type == LL_PCODE_PROFILE_ISOTRI ||
  2043. profile_type == LL_PCODE_PROFILE_EQUALTRI ||
  2044. profile_type == LL_PCODE_PROFILE_RIGHTTRI))
  2045. {
  2046. split = 0;
  2047. }
  2048. if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH)
  2049. {
  2050. if (path_type == LL_PCODE_PATH_LINE &&
  2051. profile_type == LL_PCODE_PROFILE_CIRCLE)
  2052. {
  2053. // Cylinders do not care about Z-Axis
  2054. mLODScaleBias.set(0.6f, 0.6f, 0.f);
  2055. }
  2056. else if (path_type == LL_PCODE_PATH_CIRCLE)
  2057. {
  2058. mLODScaleBias.set(0.6f, 0.6f, 0.6f);
  2059. }
  2060. }
  2061. else
  2062. {
  2063. mLODScaleBias.set(0.5f, 0.5f, 0.5f);
  2064. }
  2065. F32 profile_detail = mDetail;
  2066. F32 path_detail = mDetail;
  2067. bool regen_path = mPathp->generate(mParams.getPathParams(), path_detail,
  2068. split);
  2069. bool regen_prof = mProfile.generate(mParams.getProfileParams(),
  2070. mPathp->isOpen(), profile_detail,
  2071. split);
  2072. if (regen_path || regen_prof)
  2073. {
  2074. S32 s_size = mPathp->mPath.size();
  2075. S32 t_size = mProfile.mVertices.size();
  2076. sNumMeshPoints -= mMesh.size();
  2077. mMesh.resize(t_size * s_size);
  2078. sNumMeshPoints += mMesh.size();
  2079. // Generate vertex positions
  2080. // Run along the path.
  2081. LLMatrix4a rot_mat;
  2082. LLVector4a tmp;
  2083. LLVector4a* dst = mMesh.mArray;
  2084. for (S32 s = 0; s < s_size; ++s)
  2085. {
  2086. F32* scale = mPathp->mPath[s].mScale.getF32ptr();
  2087. F32 sc [] = { scale[0], 0, 0, 0,
  2088. 0, scale[1], 0, 0,
  2089. 0, 0, scale[2], 0,
  2090. 0, 0, 0, 1 };
  2091. LLMatrix4 rot(mPathp->mPath[s].mRot.getF32ptr());
  2092. LLMatrix4 scale_mat(sc);
  2093. scale_mat *= rot;
  2094. rot_mat.loadu(scale_mat);
  2095. LLVector4a* profile = mProfile.mVertices.mArray;
  2096. LLVector4a* end_profile = profile + t_size;
  2097. LLVector4a offset = mPathp->mPath[s].mPos;
  2098. if (!offset.isFinite3())
  2099. {
  2100. llwarns_sparse << "Path with non-finite points. Resetting offset to 0."
  2101. << llendl;
  2102. offset.clear();
  2103. }
  2104. // Run along the profile.
  2105. while (profile < end_profile)
  2106. {
  2107. rot_mat.rotate(*profile++, tmp);
  2108. dst->setAdd(tmp, offset);
  2109. llassert(dst->isFinite3());
  2110. ++dst;
  2111. }
  2112. }
  2113. for (std::vector<LLProfile::Face>::const_iterator
  2114. iter = mProfile.mFaces.begin(),
  2115. end = mProfile.mFaces.end();
  2116. iter != end; ++iter)
  2117. {
  2118. LLFaceID id = iter->mFaceID;
  2119. mFaceMask |= id;
  2120. }
  2121. return true;
  2122. }
  2123. return false;
  2124. }
  2125. #if LL_JEMALLOC
  2126. // Initialize with sane values, in case our allocators get called before the
  2127. // jemalloc arena for them is set.
  2128. U32 LLVolumeFace::sMallocxFlags16 = MALLOCX_ALIGN(16) | MALLOCX_TCACHE_NONE;
  2129. U32 LLVolumeFace::sMallocxFlags64 = MALLOCX_ALIGN(64) | MALLOCX_TCACHE_NONE;
  2130. #endif
  2131. //static
  2132. void LLVolumeFace::initClass()
  2133. {
  2134. #if LL_JEMALLOC
  2135. static unsigned int arena = 0;
  2136. if (!arena)
  2137. {
  2138. size_t sz = sizeof(arena);
  2139. if (mallctl("arenas.create", &arena, &sz, NULL, 0))
  2140. {
  2141. llwarns << "Failed to create a new jemalloc arena" << llendl;
  2142. }
  2143. }
  2144. llinfos << "Using jemalloc arena " << arena << " for volume faces memory"
  2145. << llendl;
  2146. sMallocxFlags16 = MALLOCX_ARENA(arena) | MALLOCX_ALIGN(16) |
  2147. MALLOCX_TCACHE_NONE;
  2148. sMallocxFlags64 = MALLOCX_ARENA(arena) | MALLOCX_ALIGN(64) |
  2149. MALLOCX_TCACHE_NONE;
  2150. #endif
  2151. }
  2152. void LLVolumeFace::VertexData::init()
  2153. {
  2154. if (!mData)
  2155. {
  2156. mData = (LLVector4a*)allocate_volume_mem(sizeof(LLVector4a) * 2);
  2157. }
  2158. }
  2159. const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolumeFace::VertexData& rhs)
  2160. {
  2161. if (this != &rhs)
  2162. {
  2163. init();
  2164. LLVector4a::memcpyNonAliased16((F32*)mData, (F32*)rhs.mData,
  2165. 2 * sizeof(LLVector4a));
  2166. mTexCoord = rhs.mTexCoord;
  2167. }
  2168. return *this;
  2169. }
  2170. LLVolumeFace::VertexData::~VertexData()
  2171. {
  2172. if (mData)
  2173. {
  2174. free_volume_mem(mData);
  2175. mData = NULL;
  2176. }
  2177. }
  2178. bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const
  2179. {
  2180. const F32* lp = this->getPosition().getF32ptr();
  2181. const F32* rp = rhs.getPosition().getF32ptr();
  2182. if (lp[0] != rp[0])
  2183. {
  2184. return lp[0] < rp[0];
  2185. }
  2186. if (rp[1] != lp[1])
  2187. {
  2188. return lp[1] < rp[1];
  2189. }
  2190. if (rp[2] != lp[2])
  2191. {
  2192. return lp[2] < rp[2];
  2193. }
  2194. lp = getNormal().getF32ptr();
  2195. rp = rhs.getNormal().getF32ptr();
  2196. if (lp[0] != rp[0])
  2197. {
  2198. return lp[0] < rp[0];
  2199. }
  2200. if (rp[1] != lp[1])
  2201. {
  2202. return lp[1] < rp[1];
  2203. }
  2204. if (rp[2] != lp[2])
  2205. {
  2206. return lp[2] < rp[2];
  2207. }
  2208. if (mTexCoord.mV[0] != rhs.mTexCoord.mV[0])
  2209. {
  2210. return mTexCoord.mV[0] < rhs.mTexCoord.mV[0];
  2211. }
  2212. return mTexCoord.mV[1] < rhs.mTexCoord.mV[1];
  2213. }
  2214. bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs,
  2215. F32 angle_cutoff) const
  2216. {
  2217. bool retval = false;
  2218. constexpr F32 epsilon = 0.00001f;
  2219. if (rhs.mData[POSITION].equals3(mData[POSITION], epsilon) &&
  2220. fabs(rhs.mTexCoord[0]-mTexCoord[0]) < epsilon &&
  2221. fabs(rhs.mTexCoord[1]-mTexCoord[1]) < epsilon)
  2222. {
  2223. if (angle_cutoff > 1.f)
  2224. {
  2225. retval = (mData[NORMAL].equals3(rhs.mData[NORMAL], epsilon));
  2226. }
  2227. else
  2228. {
  2229. F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]).getF32();
  2230. retval = cur_angle > angle_cutoff;
  2231. }
  2232. }
  2233. return retval;
  2234. }
  2235. bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size)
  2236. {
  2237. // Input stream is now pointing at a zlib compressed block of LLSD.
  2238. // Decompress block.
  2239. LLSD mdl;
  2240. if (!unzip_llsd(mdl, is, size))
  2241. {
  2242. LL_DEBUGS("MeshVolume") << "Failed to unzip LLSD blob for LoD, will probably fetch from sim again."
  2243. << LL_ENDL;
  2244. return false;
  2245. }
  2246. return unpackVolumeFaces(mdl);
  2247. }
  2248. bool LLVolume::unpackVolumeFaces(const U8* in, S32 size)
  2249. {
  2250. // 'in' is now pointing at a zlib compressed block of LLSD.
  2251. // Decompress block.
  2252. LLSD mdl;
  2253. if (!unzip_llsd(mdl, in, size))
  2254. {
  2255. LL_DEBUGS("MeshVolume") << "Failed to unzip LLSD blob for LoD, will probably fetch from sim again."
  2256. << LL_ENDL;
  2257. return false;
  2258. }
  2259. return unpackVolumeFaces(mdl);
  2260. }
  2261. bool LLVolume::unpackVolumeFaces(const LLSD& mdl)
  2262. {
  2263. size_t face_count = mdl.size();
  2264. if (face_count == 0)
  2265. {
  2266. // No faces unpacked, treat as failed decode
  2267. llwarns << "No face found !" << llendl;
  2268. return false;
  2269. }
  2270. mVolumeFaces.resize(face_count);
  2271. LLVector3 minp, maxp;
  2272. LLVector2 min_tc, max_tc;
  2273. LLVector4a min_pos, max_pos, tc_range;
  2274. for (size_t i = 0; i < face_count; ++i)
  2275. {
  2276. LLVolumeFace& face = mVolumeFaces[i];
  2277. if (mdl[i].has("NoGeometry"))
  2278. {
  2279. // Face has no geometry, continue
  2280. face.resizeIndices(3);
  2281. face.resizeVertices(1);
  2282. memset((void*)face.mPositions, 0, sizeof(LLVector4a));
  2283. memset((void*)face.mNormals, 0, sizeof(LLVector4a));
  2284. memset((void*)face.mTexCoords, 0, sizeof(LLVector2));
  2285. memset((void*)face.mIndices, 0, sizeof(U16) * 3);
  2286. continue;
  2287. }
  2288. LLSD::Binary pos = mdl[i]["Position"];
  2289. LLSD::Binary norm = mdl[i]["Normal"];
  2290. LLSD::Binary tc = mdl[i]["TexCoord0"];
  2291. LLSD::Binary idx = mdl[i]["TriangleList"];
  2292. #if LL_USE_TANGENTS
  2293. LLSD::Binary tangent = mdl[i]["Tangent"];
  2294. #endif
  2295. // Copy out indices
  2296. U32 num_indices = idx.size() / 2;
  2297. const U32 indices_to_discard = num_indices % 3;
  2298. if (indices_to_discard)
  2299. {
  2300. llwarns << "Incomplete triangle discarded from face. Indices count: "
  2301. << num_indices << " was not divisible by 3 at face index: "
  2302. << i << "/" << face_count << llendl;
  2303. num_indices -= indices_to_discard;
  2304. }
  2305. if (!face.resizeIndices(num_indices))
  2306. {
  2307. llwarns << "Failed to allocate " << num_indices
  2308. << " indices for face index: " << i << " Total: "
  2309. << face_count << llendl;
  2310. continue;
  2311. }
  2312. if (idx.empty() || face.mNumIndices < 3)
  2313. {
  2314. // Why is there an empty index list ?
  2315. llwarns << "Empty face present. Face index: " << i
  2316. << " - Faces count: " << face_count << llendl;
  2317. continue;
  2318. }
  2319. U16* indices = (U16*)&(idx[0]);
  2320. for (U32 j = 0; j < num_indices; ++j)
  2321. {
  2322. face.mIndices[j] = indices[j];
  2323. }
  2324. // Copy out vertices
  2325. U32 num_verts = pos.size() / 6;
  2326. if (!face.resizeVertices(num_verts))
  2327. {
  2328. llwarns << "Failed to allocate " << num_verts
  2329. << " vertices for face index: " << i << " Total: "
  2330. << face_count << llendl;
  2331. face.resizeIndices(0);
  2332. continue;
  2333. }
  2334. minp.setValue(mdl[i]["PositionDomain"]["Min"]);
  2335. maxp.setValue(mdl[i]["PositionDomain"]["Max"]);
  2336. min_pos.load3(minp.mV);
  2337. max_pos.load3(maxp.mV);
  2338. min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]);
  2339. max_tc.setValue(mdl[i]["TexCoord0Domain"]["Max"]);
  2340. // Unpack normalized scale/translation
  2341. if (mdl[i].has("NormalizedScale"))
  2342. {
  2343. face.mNormalizedScale.setValue(mdl[i]["NormalizedScale"]);
  2344. }
  2345. else
  2346. {
  2347. face.mNormalizedScale.set(1.f, 1.f, 1.f);
  2348. }
  2349. LLVector4a pos_range;
  2350. pos_range.setSub(max_pos, min_pos);
  2351. LLVector2 tc_range2 = max_tc - min_tc;
  2352. tc_range.set(tc_range2[0], tc_range2[1], tc_range2[0], tc_range2[1]);
  2353. LLVector4a min_tc4(min_tc[0], min_tc[1], min_tc[0], min_tc[1]);
  2354. LLVector4a* pos_out = face.mPositions;
  2355. LLVector4a* norm_out = face.mNormals;
  2356. LLVector4a* tc_out = (LLVector4a*)face.mTexCoords;
  2357. U16* v = (U16*)&(pos[0]);
  2358. for (U32 j = 0; j < num_verts; ++j)
  2359. {
  2360. pos_out->set((F32)v[0], (F32)v[1], (F32)v[2]);
  2361. pos_out->div(65535.f);
  2362. pos_out->mul(pos_range);
  2363. pos_out->add(min_pos);
  2364. ++pos_out;
  2365. v += 3;
  2366. }
  2367. if (!norm.empty())
  2368. {
  2369. U16* n = (U16*)&(norm[0]);
  2370. for (U32 j = 0; j < num_verts; ++j)
  2371. {
  2372. norm_out->set((F32)n[0], (F32)n[1], (F32)n[2]);
  2373. norm_out->div(65535.f);
  2374. norm_out->mul(2.f);
  2375. norm_out->sub(1.f);
  2376. ++norm_out;
  2377. n += 3;
  2378. }
  2379. }
  2380. else
  2381. {
  2382. memset((void*)norm_out, 0, sizeof(LLVector4a) * num_verts);
  2383. }
  2384. if (!tc.empty())
  2385. {
  2386. U16* t = (U16*)&(tc[0]);
  2387. for (U32 j = 0; j < num_verts; j += 2)
  2388. {
  2389. if (j < num_verts - 1)
  2390. {
  2391. tc_out->set((F32)t[0], (F32)t[1], (F32)t[2], (F32)t[3]);
  2392. }
  2393. else
  2394. {
  2395. tc_out->set((F32)t[0], (F32)t[1], 0.f, 0.f);
  2396. }
  2397. t += 4;
  2398. tc_out->div(65535.f);
  2399. tc_out->mul(tc_range);
  2400. tc_out->add(min_tc4);
  2401. ++tc_out;
  2402. }
  2403. }
  2404. else
  2405. {
  2406. memset((void*)tc_out, 0, sizeof(LLVector2) * num_verts);
  2407. }
  2408. #if LL_USE_TANGENTS
  2409. if (!tangent.empty())
  2410. {
  2411. face.allocateTangents(face.mNumVertices);
  2412. U16* t = (U16*)&(tangent[0]);
  2413. // Note: tangents coming from the asset may not be mikkt space, but
  2414. // they should always be used by the GLTF shaders to maintain
  2415. // compliance with the GLTF spec
  2416. LLVector4a* t_out = face.mTangents;
  2417. for (U32 j = 0; j < num_verts; ++j)
  2418. {
  2419. t_out->set((F32)t[0], (F32)t[1], (F32)t[2], (F32)t[3]);
  2420. t_out->div(65535.f);
  2421. t_out->mul(2.f);
  2422. t_out->sub(1.f);
  2423. F32* tp = t_out->getF32ptr();
  2424. tp[3] = tp[3] < 0.f ? -1.f : 1.f;
  2425. ++t_out;
  2426. t += 4;
  2427. }
  2428. }
  2429. #endif // LL_USE_TANGENTS
  2430. if (mdl[i].has("Weights"))
  2431. {
  2432. if (!face.allocateWeights(num_verts))
  2433. {
  2434. llwarns << "Failed to allocate " << num_verts
  2435. << " weights for face index: " << i << " Total: "
  2436. << face_count << llendl;
  2437. face.resizeIndices(0);
  2438. face.resizeVertices(0);
  2439. continue;
  2440. }
  2441. LLSD::Binary weights = mdl[i]["Weights"];
  2442. U32 idx = 0;
  2443. U32 cur_vertex = 0;
  2444. bool fp_prec_error = false;
  2445. while (idx < weights.size() && cur_vertex < num_verts)
  2446. {
  2447. constexpr U8 END_INFLUENCES = 0xFF;
  2448. U8 joint = weights[idx++];
  2449. U32 cur_influence = 0;
  2450. LLVector4 wght(0, 0, 0, 0);
  2451. U32 joints[4] = { 0, 0, 0, 0 };
  2452. LLVector4 joints_with_weights(0, 0, 0, 0);
  2453. while (joint != END_INFLUENCES && idx < weights.size())
  2454. {
  2455. U16 influence = weights[idx++];
  2456. influence |= ((U16)weights[idx++] << 8);
  2457. F32 w = llclamp((F32)influence / 65535.f, 0.001f, 0.999f);
  2458. wght.mV[cur_influence] = w;
  2459. joints[cur_influence++] = joint;
  2460. if (cur_influence >= 4)
  2461. {
  2462. joint = END_INFLUENCES;
  2463. }
  2464. else
  2465. {
  2466. joint = weights[idx++];
  2467. }
  2468. }
  2469. F32 wsum = wght.mV[VX] + wght.mV[VY] + wght.mV[VZ] +
  2470. wght.mV[VW];
  2471. if (wsum <= 0.f)
  2472. {
  2473. wght = LLVector4(0.999f, 0.f, 0.f, 0.f);
  2474. }
  2475. for (U32 k = 0; k < 4; ++k)
  2476. {
  2477. F32 f_combined = (F32)joints[k] + wght[k];
  2478. joints_with_weights[k] = f_combined;
  2479. if (k < cur_influence &&
  2480. f_combined - (S32)f_combined <= 0.f)
  2481. {
  2482. // Any weights we added above should wind up non-zero
  2483. // and applied to a specific bone.
  2484. fp_prec_error = true;
  2485. }
  2486. }
  2487. face.mWeights[cur_vertex++].loadua(joints_with_weights.mV);
  2488. }
  2489. if (cur_vertex != num_verts || idx != weights.size())
  2490. {
  2491. llwarns << "Vertex weight count does not match vertex count !"
  2492. << llendl;
  2493. }
  2494. if (fp_prec_error)
  2495. {
  2496. LL_DEBUGS("MeshVolume") << "Floating point precision error detected."
  2497. << LL_ENDL;
  2498. }
  2499. }
  2500. // Translate modifier flags into actions:
  2501. bool do_reflect_x = false;
  2502. bool do_reverse_triangles = false;
  2503. bool do_invert_normals = false;
  2504. bool do_mirror = (mParams.getSculptType() & LL_SCULPT_FLAG_MIRROR);
  2505. if (do_mirror)
  2506. {
  2507. do_reflect_x = true;
  2508. do_reverse_triangles = !do_reverse_triangles;
  2509. }
  2510. bool do_invert = (mParams.getSculptType() & LL_SCULPT_FLAG_INVERT);
  2511. if (do_invert)
  2512. {
  2513. do_invert_normals = true;
  2514. do_reverse_triangles = !do_reverse_triangles;
  2515. }
  2516. // Now do the work
  2517. if (do_reflect_x)
  2518. {
  2519. LLVector4a* p = (LLVector4a*)face.mPositions;
  2520. LLVector4a* n = (LLVector4a*)face.mNormals;
  2521. for (S32 i = 0; i < face.mNumVertices; ++i)
  2522. {
  2523. p[i].mul(-1.f);
  2524. n[i].mul(-1.f);
  2525. }
  2526. }
  2527. if (do_invert_normals)
  2528. {
  2529. LLVector4a* n = (LLVector4a*)face.mNormals;
  2530. for (S32 i = 0; i < face.mNumVertices; ++i)
  2531. {
  2532. n[i].mul(-1.f);
  2533. }
  2534. }
  2535. if (do_reverse_triangles)
  2536. {
  2537. for (S32 j = 0; j < face.mNumIndices; j += 3)
  2538. {
  2539. // Swap the 2nd and 3rd index
  2540. S32 swap = face.mIndices[j + 1];
  2541. face.mIndices[j + 1] = face.mIndices[j + 2];
  2542. face.mIndices[j + 2] = swap;
  2543. }
  2544. }
  2545. // Calculate bounding box
  2546. LLVector4a& min = face.mExtents[0];
  2547. LLVector4a& max = face.mExtents[1];
  2548. if (face.mNumVertices < 3)
  2549. {
  2550. // Empty face, use a dummy 1cm (at 1m scale) bounding box
  2551. min.splat(-0.005f);
  2552. max.splat(0.005f);
  2553. }
  2554. else
  2555. {
  2556. min = max = face.mPositions[0];
  2557. for (S32 i = 1; i < face.mNumVertices; ++i)
  2558. {
  2559. min.setMin(min, face.mPositions[i]);
  2560. max.setMax(max, face.mPositions[i]);
  2561. }
  2562. if (face.mTexCoords)
  2563. {
  2564. LLVector2& min_tc = face.mTexCoordExtents[0];
  2565. LLVector2& max_tc = face.mTexCoordExtents[1];
  2566. min_tc = face.mTexCoords[0];
  2567. max_tc = face.mTexCoords[0];
  2568. for (S32 j = 1; j < face.mNumVertices; ++j)
  2569. {
  2570. update_min_max(min_tc, max_tc, face.mTexCoords[j]);
  2571. }
  2572. }
  2573. else
  2574. {
  2575. face.mTexCoordExtents[0].set(0, 0);
  2576. face.mTexCoordExtents[1].set(1, 1);
  2577. }
  2578. }
  2579. }
  2580. if (sOptimizeCache && !cacheOptimize(gUsePBRShaders))
  2581. {
  2582. llwarns << "Failed to optimize cache." << llendl;
  2583. mVolumeFaces.clear();
  2584. return false;
  2585. }
  2586. mSculptLevel = 0; // Success !
  2587. return true;
  2588. }
  2589. bool LLVolume::cacheOptimize(bool gen_tangents)
  2590. {
  2591. const S32 count = mVolumeFaces.size();
  2592. #if LL_OPENMP
  2593. // NOTE: we cannot use OpenMP when called from the mesh repository which is
  2594. // itself a (p)thread (pthread and OpenMP threads are incompatible)... HB
  2595. if (is_main_thread())
  2596. {
  2597. LLAtomicBool success(true);
  2598. # pragma omp parallel for
  2599. for (S32 i = 0; success && i < count; ++i)
  2600. {
  2601. if (!mVolumeFaces[i].cacheOptimize(gen_tangents))
  2602. {
  2603. success = false;
  2604. }
  2605. }
  2606. return success;
  2607. }
  2608. #endif
  2609. for (S32 i = 0; i < count; ++i)
  2610. {
  2611. if (!mVolumeFaces[i].cacheOptimize())
  2612. {
  2613. return false;
  2614. }
  2615. }
  2616. return true;
  2617. }
  2618. void LLVolume::createVolumeFaces()
  2619. {
  2620. if (mGenerateSingleFace)
  2621. {
  2622. // Do nothing
  2623. return;
  2624. }
  2625. S32 num_faces = getNumFaces();
  2626. bool partial_build = true;
  2627. if (num_faces != (S32)mVolumeFaces.size())
  2628. {
  2629. partial_build = false;
  2630. mVolumeFaces.resize(num_faces);
  2631. }
  2632. // Initialize volume faces with parameter data
  2633. for (S32 i = 0, count = (S32)mVolumeFaces.size(); i < count; ++i)
  2634. {
  2635. LLVolumeFace& vf = mVolumeFaces[i];
  2636. LLProfile::Face& face = mProfile.mFaces[i];
  2637. vf.mBeginS = face.mIndex;
  2638. vf.mNumS = face.mCount;
  2639. if (vf.mNumS < 0)
  2640. {
  2641. llerrs << "Volume face corruption detected." << llendl;
  2642. }
  2643. vf.mBeginT = 0;
  2644. vf.mNumT = getPath().mPath.size();
  2645. vf.mID = i;
  2646. // Set the type mask bits correctly
  2647. if (mParams.getProfileParams().getHollow() > 0)
  2648. {
  2649. vf.mTypeMask |= LLVolumeFace::HOLLOW_MASK;
  2650. }
  2651. if (mProfile.isOpen())
  2652. {
  2653. vf.mTypeMask |= LLVolumeFace::OPEN_MASK;
  2654. }
  2655. if (face.mCap)
  2656. {
  2657. vf.mTypeMask |= LLVolumeFace::CAP_MASK;
  2658. if (face.mFaceID == LL_FACE_PATH_BEGIN)
  2659. {
  2660. vf.mTypeMask |= LLVolumeFace::TOP_MASK;
  2661. }
  2662. else
  2663. {
  2664. llassert(face.mFaceID == LL_FACE_PATH_END);
  2665. vf.mTypeMask |= LLVolumeFace::BOTTOM_MASK;
  2666. }
  2667. }
  2668. else if (face.mFaceID & (LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END))
  2669. {
  2670. vf.mTypeMask |= LLVolumeFace::FLAT_MASK | LLVolumeFace::END_MASK;
  2671. }
  2672. else
  2673. {
  2674. vf.mTypeMask |= LLVolumeFace::SIDE_MASK;
  2675. if (face.mFlat)
  2676. {
  2677. vf.mTypeMask |= LLVolumeFace::FLAT_MASK;
  2678. }
  2679. if (face.mFaceID & LL_FACE_INNER_SIDE)
  2680. {
  2681. vf.mTypeMask |= LLVolumeFace::INNER_MASK;
  2682. if (face.mFlat && vf.mNumS > 2)
  2683. {
  2684. // Flat inner faces have to copy vert normals
  2685. vf.mNumS = vf.mNumS * 2;
  2686. if (vf.mNumS < 0)
  2687. {
  2688. llerrs << "Volume face corruption detected." << llendl;
  2689. }
  2690. }
  2691. }
  2692. else
  2693. {
  2694. vf.mTypeMask |= LLVolumeFace::OUTER_MASK;
  2695. }
  2696. }
  2697. }
  2698. for (face_list_t::iterator iter = mVolumeFaces.begin();
  2699. iter != mVolumeFaces.end(); ++iter)
  2700. {
  2701. iter->create(this, partial_build);
  2702. }
  2703. }
  2704. LL_INLINE LLVector4a sculpt_rgb_to_vector(U8 r, U8 g, U8 b)
  2705. {
  2706. // maps RGB values to vector values [0..255] -> [-0.5..0.5]
  2707. LLVector4a value;
  2708. LLVector4a sub(0.5f, 0.5f, 0.5f);
  2709. value.set(r, g, b);
  2710. value.mul(1.f / 255.f);
  2711. value.sub(sub);
  2712. return value;
  2713. }
  2714. LL_INLINE U32 sculpt_xy_to_index(U32 x, U32 y, U16 sculpt_width,
  2715. U16 sculpt_height, S8 sculpt_components)
  2716. {
  2717. U32 index = (x + y * sculpt_width) * sculpt_components;
  2718. return index;
  2719. }
  2720. LL_INLINE U32 sculpt_st_to_index(S32 s, S32 t, S32 siz_s, S32 siz_t,
  2721. U16 sculpt_width, U16 sculpt_height,
  2722. S8 sculpt_components)
  2723. {
  2724. U32 x = (U32) ((F32)s / (siz_s) * (F32) sculpt_width);
  2725. U32 y = (U32) ((F32)t / (siz_t) * (F32) sculpt_height);
  2726. return sculpt_xy_to_index(x, y, sculpt_width, sculpt_height,
  2727. sculpt_components);
  2728. }
  2729. LL_INLINE LLVector4a sculpt_index_to_vector(U32 index, const U8* sculpt_data)
  2730. {
  2731. LLVector4a v = sculpt_rgb_to_vector(sculpt_data[index],
  2732. sculpt_data[index + 1],
  2733. sculpt_data[index + 2]);
  2734. return v;
  2735. }
  2736. LL_INLINE LLVector4a sculpt_st_to_vector(S32 s, S32 t, S32 siz_s, S32 siz_t,
  2737. U16 sculpt_width, U16 sculpt_height,
  2738. S8 sculpt_components,
  2739. const U8* sculpt_data)
  2740. {
  2741. U32 index = sculpt_st_to_index(s, t, siz_s, siz_t, sculpt_width,
  2742. sculpt_height, sculpt_components);
  2743. return sculpt_index_to_vector(index, sculpt_data);
  2744. }
  2745. LL_INLINE LLVector4a sculpt_xy_to_vector(U32 x, U32 y, U16 sculpt_width,
  2746. U16 sculpt_height,
  2747. S8 sculpt_components,
  2748. const U8* sculpt_data)
  2749. {
  2750. U32 index = sculpt_xy_to_index(x, y, sculpt_width, sculpt_height,
  2751. sculpt_components);
  2752. return sculpt_index_to_vector(index, sculpt_data);
  2753. }
  2754. F32 LLVolume::sculptGetSurfaceArea()
  2755. {
  2756. // Test to see if image has enough variation to create non-degenerate
  2757. // geometry
  2758. F32 area = 0;
  2759. S32 s_size = mPathp->mPath.size();
  2760. S32 t_size = mProfile.mVertices.size();
  2761. LLVector4a v0, v1, v2, v3, cross1, cross2;
  2762. for (S32 s = 0; s < s_size - 1; ++s)
  2763. {
  2764. for (S32 t = 0; t < t_size - 1; ++t)
  2765. {
  2766. // Get four corners of quad
  2767. LLVector4a& p1 = mMesh[s * t_size + t];
  2768. LLVector4a& p2 = mMesh[(s + 1) * t_size + t];
  2769. LLVector4a& p3 = mMesh[s * t_size + t + 1];
  2770. LLVector4a& p4 = mMesh[(s + 1) * t_size + t + 1];
  2771. // Compute the area of the quad by taking the length of the cross
  2772. // product of the two triangles
  2773. v0.setSub(p1, p2);
  2774. v1.setSub(p1, p3);
  2775. v2.setSub(p4, p2);
  2776. v3.setSub(p4, p3);
  2777. cross1.setCross3(v0, v1);
  2778. cross2.setCross3(v2, v3);
  2779. area += (cross1.getLength3() +
  2780. cross2.getLength3()).getF32() * 0.5f;
  2781. }
  2782. }
  2783. return area;
  2784. }
  2785. // Create empty placeholder shape
  2786. void LLVolume::sculptGenerateEmptyPlaceholder()
  2787. {
  2788. S32 s_size = mPathp->mPath.size();
  2789. S32 t_size = mProfile.mVertices.size();
  2790. S32 line = 0;
  2791. for (S32 s = 0; s < s_size; ++s)
  2792. {
  2793. for (S32 t = 0; t < t_size; ++t)
  2794. {
  2795. S32 i = t + line;
  2796. LLVector4a& pt = mMesh[i];
  2797. F32* p = pt.getF32ptr();
  2798. p[0] = p[1] = p[2] = 0.f;
  2799. }
  2800. line += t_size;
  2801. }
  2802. }
  2803. void LLVolume::sculptGenerateSpherePlaceholder()
  2804. {
  2805. S32 s_size = mPathp->mPath.size();
  2806. S32 t_size = mProfile.mVertices.size();
  2807. S32 line = 0;
  2808. constexpr F32 RADIUS = 0.3f;
  2809. for (S32 s = 0; s < s_size; ++s)
  2810. {
  2811. for (S32 t = 0; t < t_size; ++t)
  2812. {
  2813. S32 i = t + line;
  2814. F32 u = (F32)s / (s_size - 1) * 2.f * F_PI;
  2815. F32 v = (F32)t / (t_size - 1) * F_PI;
  2816. LLVector4a& pt = mMesh[i];
  2817. F32* p = pt.getF32ptr();
  2818. p[0] = sinf(v) * cosf(u) * RADIUS;
  2819. p[1] = sinf(v) * sinf(u) * RADIUS;
  2820. p[2] = cosf(v) * RADIUS;
  2821. }
  2822. line += t_size;
  2823. }
  2824. }
  2825. // Creates the vertices from the map
  2826. void LLVolume::sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height,
  2827. S8 sculpt_components,
  2828. const U8* sculpt_data, U8 sculpt_type)
  2829. {
  2830. U8 sculpt_stitching = sculpt_type & LL_SCULPT_TYPE_MASK;
  2831. bool sculpt_invert = (sculpt_type & LL_SCULPT_FLAG_INVERT) != 0;
  2832. bool sculpt_mirror = (sculpt_type & LL_SCULPT_FLAG_MIRROR) != 0;
  2833. bool reverse_horizontal = sculpt_invert ? !sculpt_mirror : sculpt_mirror;
  2834. S32 s_size = mPathp->mPath.size();
  2835. S32 t_size = mProfile.mVertices.size();
  2836. S32 line = 0;
  2837. for (S32 s = 0; s < s_size; ++s)
  2838. {
  2839. // Run along the profile.
  2840. for (S32 t = 0; t < t_size; ++t)
  2841. {
  2842. S32 i = t + line;
  2843. LLVector4a& pt = mMesh[i];
  2844. S32 reversed_t = t;
  2845. if (reverse_horizontal)
  2846. {
  2847. reversed_t = t_size - t - 1;
  2848. }
  2849. U32 x = (U32)((F32)reversed_t / (t_size - 1) * (F32)sculpt_width);
  2850. U32 y = (U32)((F32)s / (s_size - 1) * (F32)sculpt_height);
  2851. if (y == 0) // top row stitching
  2852. {
  2853. // Pinch ?
  2854. if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE)
  2855. {
  2856. x = sculpt_width / 2;
  2857. }
  2858. }
  2859. if (y == sculpt_height) // bottom row stitching
  2860. {
  2861. // Wrap ?
  2862. if (sculpt_stitching == LL_SCULPT_TYPE_TORUS)
  2863. {
  2864. y = 0;
  2865. }
  2866. else
  2867. {
  2868. y = sculpt_height - 1;
  2869. }
  2870. // Pinch ?
  2871. if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE)
  2872. {
  2873. x = sculpt_width / 2;
  2874. }
  2875. }
  2876. if (x == sculpt_width) // side stitching
  2877. {
  2878. // Wrap ?
  2879. if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE ||
  2880. sculpt_stitching == LL_SCULPT_TYPE_TORUS ||
  2881. sculpt_stitching == LL_SCULPT_TYPE_CYLINDER)
  2882. {
  2883. x = 0;
  2884. }
  2885. else
  2886. {
  2887. x = sculpt_width - 1;
  2888. }
  2889. }
  2890. pt = sculpt_xy_to_vector(x, y, sculpt_width, sculpt_height,
  2891. sculpt_components, sculpt_data);
  2892. if (sculpt_mirror)
  2893. {
  2894. static const LLVector4a scale(-1.f, 1.f, 1.f, 1.f);
  2895. pt.mul(scale);
  2896. }
  2897. llassert(pt.isFinite3());
  2898. }
  2899. line += t_size;
  2900. }
  2901. }
  2902. // Changed from 4 to 6 - 6 looks round whereas 4 looks square:
  2903. constexpr S32 SCULPT_REZ_1 = 6;
  2904. constexpr S32 SCULPT_REZ_2 = 8;
  2905. constexpr S32 SCULPT_REZ_3 = 16;
  2906. constexpr S32 SCULPT_REZ_4 = 32;
  2907. S32 sculpt_sides(F32 detail)
  2908. {
  2909. // detail is usually one of: 1, 1.5, 2.5, 4.0.
  2910. if (detail <= 1.f)
  2911. {
  2912. return SCULPT_REZ_1;
  2913. }
  2914. if (detail <= 2.f)
  2915. {
  2916. return SCULPT_REZ_2;
  2917. }
  2918. if (detail <= 3.f)
  2919. {
  2920. return SCULPT_REZ_3;
  2921. }
  2922. else
  2923. {
  2924. return SCULPT_REZ_4;
  2925. }
  2926. }
  2927. // Determine the number of vertices in both s and t direction for this sculpt
  2928. void sculpt_calc_mesh_resolution(U16 width, U16 height, U8 type, F32 detail,
  2929. S32& s, S32& t)
  2930. {
  2931. // this code has the following properties:
  2932. // 1) the aspect ratio of the mesh is as close as possible to the ratio of
  2933. // the map while still using all available verts
  2934. // 2) the mesh cannot have more verts than is allowed by LOD
  2935. // 3) the mesh cannot have more verts than is allowed by the map
  2936. S32 max_vertices_lod = (S32)powf((F32)sculpt_sides(detail), 2.f);
  2937. S32 max_vertices_map = width * height / 4;
  2938. S32 vertices;
  2939. if (max_vertices_map > 0)
  2940. {
  2941. vertices = llmin(max_vertices_lod, max_vertices_map);
  2942. }
  2943. else
  2944. {
  2945. vertices = max_vertices_lod;
  2946. }
  2947. F32 ratio;
  2948. if (width == 0 || height == 0)
  2949. {
  2950. ratio = 1.f;
  2951. }
  2952. else
  2953. {
  2954. ratio = (F32) width / (F32) height;
  2955. }
  2956. s = (S32)sqrtf((F32)vertices / ratio);
  2957. s = llmax(s, 4); // No degenerate sizes, please
  2958. t = vertices / s;
  2959. t = llmax(t, 4); // No degenerate sizes, please
  2960. s = vertices / t;
  2961. }
  2962. // This method replaces generate() for sculpted surfaces
  2963. void LLVolume::sculpt(U16 sculpt_width, U16 sculpt_height,
  2964. S8 sculpt_components, const U8* sculpt_data,
  2965. S32 sculpt_level, bool visible_placeholder)
  2966. {
  2967. U8 sculpt_type = mParams.getSculptType();
  2968. bool data_is_empty = false;
  2969. if (sculpt_width == 0 || sculpt_height == 0 || sculpt_components < 3 ||
  2970. !sculpt_data)
  2971. {
  2972. sculpt_level = -1;
  2973. data_is_empty = true;
  2974. }
  2975. S32 requested_s_size = 0;
  2976. S32 requested_t_size = 0;
  2977. // Always create oblong sculpties with high LOD
  2978. F32 sculpt_detail = mDetail;
  2979. if (sculpt_detail < 4.f && sculpt_width != sculpt_height)
  2980. {
  2981. sculpt_detail = 4.f;
  2982. }
  2983. sculpt_calc_mesh_resolution(sculpt_width, sculpt_height, sculpt_type,
  2984. sculpt_detail, requested_s_size,
  2985. requested_t_size);
  2986. mPathp->generate(mParams.getPathParams(), mDetail, 0, true,
  2987. requested_s_size);
  2988. mProfile.generate(mParams.getProfileParams(), mPathp->isOpen(), mDetail, 0,
  2989. true, requested_t_size);
  2990. /// We requested a specific size, now see what we really got
  2991. S32 s_size = mPathp->mPath.size();
  2992. S32 t_size = mProfile.mVertices.size();
  2993. // weird crash bug - DEV-11158 - trying to collect more data:
  2994. if (s_size == 0 || t_size == 0)
  2995. {
  2996. llwarns << "Sculpt bad mesh size " << s_size << " " << t_size
  2997. << llendl;
  2998. }
  2999. sNumMeshPoints -= mMesh.size();
  3000. mMesh.resize(s_size * t_size);
  3001. sNumMeshPoints += mMesh.size();
  3002. // Generate vertex positions
  3003. if (!data_is_empty)
  3004. {
  3005. sculptGenerateMapVertices(sculpt_width, sculpt_height,
  3006. sculpt_components, sculpt_data,
  3007. sculpt_type);
  3008. // Do not test lowest LOD to support legacy content DEV-33670
  3009. if (mDetail > SCULPT_MIN_AREA_DETAIL)
  3010. {
  3011. F32 area = sculptGetSurfaceArea();
  3012. mSurfaceArea = area;
  3013. constexpr F32 SCULPT_MAX_AREA = 384.f;
  3014. if (area < SCULPT_MIN_AREA || area > SCULPT_MAX_AREA)
  3015. {
  3016. data_is_empty = visible_placeholder = true;
  3017. }
  3018. }
  3019. }
  3020. if (data_is_empty)
  3021. {
  3022. if (visible_placeholder)
  3023. {
  3024. sculptGenerateSpherePlaceholder();
  3025. }
  3026. else
  3027. {
  3028. sculptGenerateEmptyPlaceholder();
  3029. }
  3030. }
  3031. for (S32 i = 0; i < (S32)mProfile.mFaces.size(); ++i)
  3032. {
  3033. mFaceMask |= mProfile.mFaces[i].mFaceID;
  3034. }
  3035. mSculptLevel = sculpt_level;
  3036. // Delete any existing faces so that they get regenerated
  3037. mVolumeFaces.clear();
  3038. createVolumeFaces();
  3039. }
  3040. bool LLVolumeParams::operator==(const LLVolumeParams& params) const
  3041. {
  3042. return getPathParams() == params.getPathParams() &&
  3043. getProfileParams() == params.getProfileParams() &&
  3044. mSculptID == params.mSculptID &&
  3045. mSculptType == params.mSculptType;
  3046. }
  3047. bool LLVolumeParams::operator!=(const LLVolumeParams& params) const
  3048. {
  3049. return getPathParams() != params.getPathParams() ||
  3050. getProfileParams() != params.getProfileParams() ||
  3051. mSculptID != params.mSculptID ||
  3052. mSculptType != params.mSculptType;
  3053. }
  3054. bool LLVolumeParams::operator<(const LLVolumeParams& params) const
  3055. {
  3056. if (getPathParams() != params.getPathParams())
  3057. {
  3058. return getPathParams() < params.getPathParams();
  3059. }
  3060. if (getProfileParams() != params.getProfileParams())
  3061. {
  3062. return getProfileParams() < params.getProfileParams();
  3063. }
  3064. if (mSculptID != params.mSculptID)
  3065. {
  3066. return mSculptID < params.mSculptID;
  3067. }
  3068. return mSculptType < params.mSculptType;
  3069. }
  3070. void LLVolumeParams::copyParams(const LLVolumeParams& params)
  3071. {
  3072. mProfileParams.copyParams(params.mProfileParams);
  3073. mPathParams.copyParams(params.mPathParams);
  3074. mSculptID = params.getSculptID();
  3075. mSculptType = params.getSculptType();
  3076. }
  3077. // Less restricitve approx 0 for volumes
  3078. constexpr F32 APPROXIMATELY_ZERO = 0.001f;
  3079. LL_INLINE static bool approx_zero(F32 f, F32 tolerance)
  3080. {
  3081. return f >= -tolerance && f <= tolerance;
  3082. }
  3083. // Returns true if in range (or nearly so)
  3084. static bool limit_range(F32& v, F32 min, F32 max,
  3085. F32 tolerance = APPROXIMATELY_ZERO)
  3086. {
  3087. if (v < min)
  3088. {
  3089. LL_DEBUGS("VolumeMessage") << "Wrong value = " << v << " - min = "
  3090. << min << ". Clamping." << LL_ENDL;
  3091. v = min;
  3092. if (!approx_zero(v - min, tolerance))
  3093. {
  3094. return false;
  3095. }
  3096. }
  3097. if (v > max)
  3098. {
  3099. LL_DEBUGS("VolumeMessage") << "Wrong value = " << v << " - max = "
  3100. << max << ". Clamping." << LL_ENDL;
  3101. v = max;
  3102. if (!approx_zero(max - v, tolerance))
  3103. {
  3104. return false;
  3105. }
  3106. }
  3107. return true;
  3108. }
  3109. bool LLVolumeParams::setBeginAndEndS(F32 b, F32 e)
  3110. {
  3111. bool valid = true;
  3112. // First, clamp to valid ranges.
  3113. F32 begin = b;
  3114. valid &= limit_range(begin, 0.f, 1.f - OBJECT_MIN_CUT_INC);
  3115. F32 end = e;
  3116. if (end >= .0149f && end < OBJECT_MIN_CUT_INC)
  3117. {
  3118. // Eliminate warning for common rounding error
  3119. end = OBJECT_MIN_CUT_INC;
  3120. }
  3121. valid &= limit_range(end, OBJECT_MIN_CUT_INC, 1.f);
  3122. valid &= limit_range(begin, 0.f, end - OBJECT_MIN_CUT_INC, .01f);
  3123. // Now set them.
  3124. mProfileParams.setBegin(begin);
  3125. mProfileParams.setEnd(end);
  3126. return valid;
  3127. }
  3128. bool LLVolumeParams::setBeginAndEndT(F32 b, F32 e)
  3129. {
  3130. bool valid = true;
  3131. // First, clamp to valid ranges.
  3132. F32 begin = b;
  3133. valid &= limit_range(begin, 0.f, 1.f - OBJECT_MIN_CUT_INC);
  3134. F32 end = e;
  3135. valid &= limit_range(end, OBJECT_MIN_CUT_INC, 1.f);
  3136. valid &= limit_range(begin, 0.f, end - OBJECT_MIN_CUT_INC, .01f);
  3137. // Now set them.
  3138. mPathParams.setBegin(begin);
  3139. mPathParams.setEnd(end);
  3140. return valid;
  3141. }
  3142. bool LLVolumeParams::setHollow(F32 h)
  3143. {
  3144. // Validate the hollow based on path and profile.
  3145. U8 profile = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK;
  3146. U8 hole_type = mProfileParams.getCurveType() & LL_PCODE_HOLE_MASK;
  3147. F32 max_hollow = OBJECT_HOLLOW_MAX;
  3148. // Only square holes have trouble.
  3149. if (hole_type == LL_PCODE_HOLE_SQUARE &&
  3150. (profile == LL_PCODE_PROFILE_CIRCLE ||
  3151. profile == LL_PCODE_PROFILE_CIRCLE_HALF ||
  3152. profile == LL_PCODE_PROFILE_EQUALTRI))
  3153. {
  3154. max_hollow = OBJECT_HOLLOW_MAX_SQUARE;
  3155. }
  3156. F32 hollow = h;
  3157. bool valid = limit_range(hollow, OBJECT_HOLLOW_MIN, max_hollow);
  3158. mProfileParams.setHollow(hollow);
  3159. return valid;
  3160. }
  3161. bool LLVolumeParams::setTwistBegin(F32 b)
  3162. {
  3163. F32 twist_begin = b;
  3164. bool valid = limit_range(twist_begin, OBJECT_TWIST_MIN, OBJECT_TWIST_MAX);
  3165. mPathParams.setTwistBegin(twist_begin);
  3166. return valid;
  3167. }
  3168. bool LLVolumeParams::setTwistEnd(F32 e)
  3169. {
  3170. F32 twist_end = e;
  3171. bool valid = limit_range(twist_end, OBJECT_TWIST_MIN, OBJECT_TWIST_MAX);
  3172. mPathParams.setTwistEnd(twist_end);
  3173. return valid;
  3174. }
  3175. bool LLVolumeParams::setRatio(F32 x, F32 y)
  3176. {
  3177. F32 min_x = RATIO_MIN;
  3178. F32 max_x = RATIO_MAX;
  3179. F32 min_y = RATIO_MIN;
  3180. F32 max_y = RATIO_MAX;
  3181. // If this is a circular path (and not a sphere) then 'ratio' is actually
  3182. // hole size.
  3183. U8 path_type = mPathParams.getCurveType();
  3184. U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK;
  3185. if (LL_PCODE_PATH_CIRCLE == path_type &&
  3186. LL_PCODE_PROFILE_CIRCLE_HALF != profile_type)
  3187. {
  3188. // Holes are more restricted...
  3189. min_x = OBJECT_MIN_HOLE_SIZE;
  3190. max_x = OBJECT_MAX_HOLE_SIZE_X;
  3191. min_y = OBJECT_MIN_HOLE_SIZE;
  3192. max_y = OBJECT_MAX_HOLE_SIZE_Y;
  3193. }
  3194. F32 ratio_x = x;
  3195. bool valid = limit_range(ratio_x, min_x, max_x);
  3196. F32 ratio_y = y;
  3197. valid &= limit_range(ratio_y, min_y, max_y);
  3198. mPathParams.setScale(ratio_x, ratio_y);
  3199. return valid;
  3200. }
  3201. bool LLVolumeParams::setShear(F32 x, F32 y)
  3202. {
  3203. F32 shear_x = x;
  3204. bool valid = limit_range(shear_x, SHEAR_MIN, SHEAR_MAX);
  3205. F32 shear_y = y;
  3206. valid &= limit_range(shear_y, SHEAR_MIN, SHEAR_MAX);
  3207. mPathParams.setShear(shear_x, shear_y);
  3208. return valid;
  3209. }
  3210. bool LLVolumeParams::setTaperX(F32 v)
  3211. {
  3212. F32 taper = v;
  3213. bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX);
  3214. mPathParams.setTaperX(taper);
  3215. return valid;
  3216. }
  3217. bool LLVolumeParams::setTaperY(F32 v)
  3218. {
  3219. F32 taper = v;
  3220. bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX);
  3221. mPathParams.setTaperY(taper);
  3222. return valid;
  3223. }
  3224. bool LLVolumeParams::setRevolutions(F32 r)
  3225. {
  3226. F32 revolutions = r;
  3227. bool valid = limit_range(revolutions, OBJECT_REV_MIN, OBJECT_REV_MAX);
  3228. mPathParams.setRevolutions(revolutions);
  3229. return valid;
  3230. }
  3231. bool LLVolumeParams::setRadiusOffset(F32 offset)
  3232. {
  3233. bool valid = true;
  3234. // If this is a sphere, just set it to 0 and get out.
  3235. U8 path_type = mPathParams.getCurveType();
  3236. U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK;
  3237. if (profile_type == LL_PCODE_PROFILE_CIRCLE_HALF ||
  3238. path_type != LL_PCODE_PATH_CIRCLE)
  3239. {
  3240. mPathParams.setRadiusOffset(0.f);
  3241. return true;
  3242. }
  3243. // Limit radius offset, based on taper and hole size y.
  3244. F32 radius_offset = offset;
  3245. F32 taper_y = getTaperY();
  3246. F32 radius_mag = fabs(radius_offset);
  3247. F32 hole_y_mag = fabs(getRatioY());
  3248. F32 taper_y_mag = fabs(taper_y);
  3249. // Check to see if the taper effects us.
  3250. if ((radius_offset > 0.f && taper_y < 0.f) ||
  3251. (radius_offset < 0.f && taper_y > 0.f))
  3252. {
  3253. // The taper does not help increase the radius offset range.
  3254. taper_y_mag = 0.f;
  3255. }
  3256. F32 max_radius_mag = 1.f - hole_y_mag * (1.f - taper_y_mag) /
  3257. (1.f - hole_y_mag);
  3258. // Enforce the maximum magnitude.
  3259. F32 delta = max_radius_mag - radius_mag;
  3260. if (delta < 0.f)
  3261. {
  3262. // Check radius offset sign.
  3263. if (radius_offset < 0.f)
  3264. {
  3265. radius_offset = -max_radius_mag;
  3266. }
  3267. else
  3268. {
  3269. radius_offset = max_radius_mag;
  3270. }
  3271. valid = approx_zero(delta, .1f);
  3272. }
  3273. mPathParams.setRadiusOffset(radius_offset);
  3274. return valid;
  3275. }
  3276. bool LLVolumeParams::setSkew(F32 skew_value)
  3277. {
  3278. bool valid = true;
  3279. // Check the skew value against the revolutions.
  3280. F32 skew = llclamp(skew_value, SKEW_MIN, SKEW_MAX);
  3281. F32 skew_mag = fabs(skew);
  3282. F32 revolutions = getRevolutions();
  3283. F32 scale_x = getRatioX();
  3284. F32 min_skew_mag = 1.f - 1.f / (revolutions * scale_x + 1.f);
  3285. // Discontinuity; A revolution of 1 allows skews below 0.5.
  3286. if (fabs(revolutions - 1.f) < 0.001)
  3287. {
  3288. min_skew_mag = 0.f;
  3289. }
  3290. // Clip skew.
  3291. F32 delta = skew_mag - min_skew_mag;
  3292. if (delta < 0.f)
  3293. {
  3294. // Check skew sign.
  3295. if (skew < 0.f)
  3296. {
  3297. skew = -min_skew_mag;
  3298. }
  3299. else
  3300. {
  3301. skew = min_skew_mag;
  3302. }
  3303. valid = approx_zero(delta, .01f);
  3304. }
  3305. mPathParams.setSkew(skew);
  3306. return valid;
  3307. }
  3308. bool LLVolumeParams::setSculptID(const LLUUID& sculpt_id, U8 sculpt_type)
  3309. {
  3310. mSculptID = sculpt_id;
  3311. mSculptType = sculpt_type;
  3312. return true;
  3313. }
  3314. bool LLVolumeParams::setType(U8 profile, U8 path)
  3315. {
  3316. bool result = true;
  3317. // First, check profile and path for validity.
  3318. U8 profile_type = profile & LL_PCODE_PROFILE_MASK;
  3319. U8 hole_type = (profile & LL_PCODE_HOLE_MASK) >> 4;
  3320. U8 path_type = path >> 4;
  3321. if (profile_type > LL_PCODE_PROFILE_MAX)
  3322. {
  3323. // Bad profile. Make it square.
  3324. profile = LL_PCODE_PROFILE_SQUARE;
  3325. result = false;
  3326. llwarns << "Changing bad profile type (" << (S32)profile_type
  3327. << ") to be LL_PCODE_PROFILE_SQUARE" << llendl;
  3328. }
  3329. else if (hole_type > LL_PCODE_HOLE_MAX)
  3330. {
  3331. // Bad hole. Make it the same.
  3332. profile = profile_type;
  3333. result = false;
  3334. llwarns << "Changing bad hole type (" << (S32)hole_type
  3335. << ") to be LL_PCODE_HOLE_SAME" << llendl;
  3336. }
  3337. if (path_type < LL_PCODE_PATH_MIN ||
  3338. path_type > LL_PCODE_PATH_MAX)
  3339. {
  3340. // Bad path. Make it linear.
  3341. result = false;
  3342. llwarns << "Changing bad path (" << (S32)path
  3343. << ") to be LL_PCODE_PATH_LINE" << llendl;
  3344. path = LL_PCODE_PATH_LINE;
  3345. }
  3346. mProfileParams.setCurveType(profile);
  3347. mPathParams.setCurveType(path);
  3348. return result;
  3349. }
  3350. // static
  3351. bool LLVolumeParams::validate(U8 prof_curve, F32 prof_begin, F32 prof_end,
  3352. F32 hollow, U8 path_curve, F32 path_begin,
  3353. F32 path_end, F32 scx, F32 scy, F32 shx, F32 shy,
  3354. F32 twistend, F32 twistbegin, F32 radiusoffset,
  3355. F32 tx, F32 ty, F32 revolutions, F32 skew)
  3356. {
  3357. LLVolumeParams test_params;
  3358. return test_params.setType(prof_curve, path_curve) &&
  3359. test_params.setBeginAndEndS(prof_begin, prof_end) &&
  3360. test_params.setBeginAndEndT(path_begin, path_end) &&
  3361. test_params.setHollow(hollow) &&
  3362. test_params.setTwistBegin(twistbegin) &&
  3363. test_params.setTwistEnd(twistend) &&
  3364. test_params.setRatio(scx, scy) &&
  3365. test_params.setShear(shx, shy) &&
  3366. test_params.setTaper(tx, ty) &&
  3367. test_params.setRevolutions(revolutions) &&
  3368. test_params.setRadiusOffset(radiusoffset) &&
  3369. test_params.setSkew(skew);
  3370. }
  3371. // Attempt to approximate the number of triangles that will result from
  3372. // generating a volume LoD set for the supplied LLVolumeParams: inaccurate, but
  3373. // a close enough approximation for determining streaming cost
  3374. void LLVolume::getLoDTriangleCounts(S32* counts)
  3375. {
  3376. const LLPathParams& path = mParams.getPathParams();
  3377. const LLProfileParams& prof = mParams.getProfileParams();
  3378. if (mTrianglesCache && mTrianglesCache->mPathParams == path &&
  3379. mTrianglesCache->mProfileParams == prof)
  3380. {
  3381. counts[0] = mTrianglesCache->mTriangles[0];
  3382. counts[1] = mTrianglesCache->mTriangles[1];
  3383. counts[2] = mTrianglesCache->mTriangles[2];
  3384. counts[3] = mTrianglesCache->mTriangles[3];
  3385. ++sLODCacheHit;
  3386. return;
  3387. }
  3388. ++sLODCacheMiss;
  3389. if (!mTrianglesCache)
  3390. {
  3391. mTrianglesCache = new TrianglesPerLODCache;
  3392. }
  3393. mTrianglesCache->mPathParams = path;
  3394. mTrianglesCache->mProfileParams = prof;
  3395. static const F32 details[] = { 1.f, 1.5f, 2.5f, 4.f };
  3396. #if LL_GNUC && GCC_VERSION >= 80000
  3397. # pragma GCC unroll 4
  3398. #elif LL_CLANG
  3399. # pragma clang loop unroll(full)
  3400. #endif
  3401. for (S32 i = 0; i < 4; ++i)
  3402. {
  3403. const F32& detail = details[i];
  3404. S32 path_points = LLPath::getNumPoints(path, detail);
  3405. S32 profile_points = LLProfile::getNumPoints(prof, false, detail);
  3406. S32 count = (profile_points - 1) * 2 * (path_points - 1);
  3407. count += profile_points * 2;
  3408. counts[i] = mTrianglesCache->mTriangles[i] = count;
  3409. }
  3410. }
  3411. S32 LLVolume::getNumTriangles(S32* vcount) const
  3412. {
  3413. U32 triangle_count = 0;
  3414. U32 vertex_count = 0;
  3415. for (S32 i = 0; i < getNumVolumeFaces(); ++i)
  3416. {
  3417. const LLVolumeFace& face = getVolumeFace(i);
  3418. triangle_count += face.mNumIndices / 3;
  3419. vertex_count += face.mNumVertices;
  3420. }
  3421. if (vcount)
  3422. {
  3423. *vcount = vertex_count;
  3424. }
  3425. return triangle_count;
  3426. }
  3427. void LLVolume::generateSilhouetteVertices(std::vector<LLVector3>& vertices,
  3428. std::vector<LLVector3>& normals,
  3429. const LLVector3& obj_cam_vec_in,
  3430. const LLMatrix4& mat_in,
  3431. const LLMatrix3& norm_mat_in,
  3432. S32 face_mask)
  3433. {
  3434. vertices.clear();
  3435. normals.clear();
  3436. if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH)
  3437. {
  3438. return;
  3439. }
  3440. LLMatrix4a mat;
  3441. mat.loadu(mat_in);
  3442. LLMatrix4a norm_mat;
  3443. norm_mat.loadu(norm_mat_in);
  3444. LLVector4a obj_cam_vec;
  3445. obj_cam_vec.load3(obj_cam_vec_in.mV);
  3446. LLVector4a c1, c2, t, norm, view;
  3447. std::vector<U8> f_facing;
  3448. S32 cur_index = 0;
  3449. // For each face
  3450. for (face_list_t::iterator iter = mVolumeFaces.begin();
  3451. iter != mVolumeFaces.end(); ++iter)
  3452. {
  3453. LLVolumeFace& face = *iter;
  3454. if (!(face_mask & (0x1 << cur_index++)) || face.mNumIndices == 0 ||
  3455. face.mEdge.empty())
  3456. {
  3457. continue;
  3458. }
  3459. if ((face.mTypeMask & LLVolumeFace::CAP_MASK))
  3460. {
  3461. LLVector4a* v = (LLVector4a*)face.mPositions;
  3462. LLVector4a* n = (LLVector4a*)face.mNormals;
  3463. for (S32 j = 0, count = face.mNumIndices / 3; j < count; ++j)
  3464. {
  3465. for (S32 k = 0; k < 3; ++k)
  3466. {
  3467. S32 index = face.mEdge[j * 3 + k];
  3468. if (index == -1)
  3469. {
  3470. // Silhouette edge, currently only cubes, so no other
  3471. // conditions
  3472. S32 v1 = face.mIndices[j * 3 + k];
  3473. S32 v2 = face.mIndices[j * 3 + ((k + 1) % 3)];
  3474. mat.affineTransform(v[v1], t);
  3475. vertices.emplace_back(t[0], t[1], t[2]);
  3476. norm_mat.rotate(n[v1], t);
  3477. t.normalize3fast();
  3478. normals.emplace_back(t[0], t[1], t[2]);
  3479. mat.affineTransform(v[v2], t);
  3480. vertices.emplace_back(t[0], t[1], t[2]);
  3481. norm_mat.rotate(n[v2], t);
  3482. t.normalize3fast();
  3483. normals.emplace_back(t[0], t[1], t[2]);
  3484. }
  3485. }
  3486. }
  3487. }
  3488. else
  3489. {
  3490. constexpr U8 AWAY = 0x01;
  3491. constexpr U8 TOWARDS = 0x02;
  3492. // For each triangle
  3493. f_facing.clear();
  3494. f_facing.resize(face.mNumIndices / 3);
  3495. LLVector4a* v = (LLVector4a*)face.mPositions;
  3496. LLVector4a* n = (LLVector4a*)face.mNormals;
  3497. for (S32 j = 0, count = face.mNumIndices / 3; j < count; ++j)
  3498. {
  3499. // Approximate normal
  3500. S32 v1 = face.mIndices[j * 3];
  3501. S32 v2 = face.mIndices[j * 3 + 1];
  3502. S32 v3 = face.mIndices[j * 3 + 2];
  3503. c1.setSub(v[v1], v[v2]);
  3504. c2.setSub(v[v2], v[v3]);
  3505. norm.setCross3(c1, c2);
  3506. if (norm.dot3(norm) < 0.00000001f)
  3507. {
  3508. f_facing[j] = AWAY | TOWARDS;
  3509. }
  3510. else
  3511. {
  3512. // Get view vector
  3513. view.setSub(obj_cam_vec, v[v1]);
  3514. bool away = view.dot3(norm) > 0.f;
  3515. if (away)
  3516. {
  3517. f_facing[j] = AWAY;
  3518. }
  3519. else
  3520. {
  3521. f_facing[j] = TOWARDS;
  3522. }
  3523. }
  3524. }
  3525. // For each triangle
  3526. for (S32 j = 0, count = face.mNumIndices / 3; j < count; ++j)
  3527. {
  3528. if (f_facing[j] == (AWAY | TOWARDS))
  3529. {
  3530. // This is a degenerate triangle. Take neighbor facing
  3531. // (degenerate faces get facing of one of their neighbors)
  3532. // *FIX IF NEEDED: this does not deal with neighboring
  3533. // degenerate faces
  3534. for (S32 k = 0; k < 3; ++k)
  3535. {
  3536. S32 index = face.mEdge[j * 3 + k];
  3537. if (index != -1)
  3538. {
  3539. f_facing[j] = f_facing[index];
  3540. break;
  3541. }
  3542. }
  3543. continue; // Skip degenerate face
  3544. }
  3545. // For each edge
  3546. for (S32 k = 0; k < 3; ++k)
  3547. {
  3548. S32 index = face.mEdge[j * 3 + k];
  3549. if (index != -1 && f_facing[index] == (AWAY | TOWARDS))
  3550. {
  3551. // Our neighbor is degenerate, make him face our direction
  3552. f_facing[face.mEdge[j * 3 + k]] = f_facing[j];
  3553. continue;
  3554. }
  3555. // index == -1 ==> no neighbor, MUST be a silhouette edge
  3556. if (index == -1 || (f_facing[index] & f_facing[j]) == 0)
  3557. {
  3558. // We found a silhouette edge
  3559. S32 v1 = face.mIndices[j * 3 + k];
  3560. S32 v2 = face.mIndices[j * 3 + (k + 1) % 3];
  3561. mat.affineTransform(v[v1], t);
  3562. vertices.emplace_back(t[0], t[1], t[2]);
  3563. norm_mat.rotate(n[v1], t);
  3564. t.normalize3fast();
  3565. normals.emplace_back(t[0], t[1], t[2]);
  3566. mat.affineTransform(v[v2], t);
  3567. vertices.emplace_back(t[0], t[1], t[2]);
  3568. norm_mat.rotate(n[v2], t);
  3569. t.normalize3fast();
  3570. normals.emplace_back(t[0], t[1], t[2]);
  3571. }
  3572. }
  3573. }
  3574. }
  3575. }
  3576. }
  3577. S32 LLVolume::lineSegmentIntersect(const LLVector4a& start,
  3578. const LLVector4a& end,
  3579. S32 face,
  3580. LLVector4a* intersection,
  3581. LLVector2* tex_coord,
  3582. LLVector4a* normal,
  3583. LLVector4a* tangent_out)
  3584. {
  3585. S32 hit_face = -1;
  3586. S32 start_face;
  3587. S32 end_face;
  3588. if (face == -1) // ALL_SIDES
  3589. {
  3590. start_face = 0;
  3591. end_face = getNumVolumeFaces() - 1;
  3592. }
  3593. else
  3594. {
  3595. start_face = face;
  3596. end_face = face;
  3597. }
  3598. LLVector4a dir;
  3599. dir.setSub(end, start);
  3600. F32 closest_t = 2.f; // must be larger than 1
  3601. end_face = llmin(end_face, getNumVolumeFaces() - 1);
  3602. LLVector4a box_center, box_size, intersect, n1, n2, n3, t1, t2, t3;
  3603. for (S32 i = start_face; i <= end_face; ++i)
  3604. {
  3605. LLVolumeFace& face = mVolumeFaces[i];
  3606. box_center.setAdd(face.mExtents[0], face.mExtents[1]);
  3607. box_center.mul(0.5f);
  3608. box_size.setSub(face.mExtents[1], face.mExtents[0]);
  3609. if (LLLineSegmentBoxIntersect(start, end, box_center, box_size))
  3610. {
  3611. // If the caller wants tangents, we may need to generate them
  3612. if (tangent_out != NULL)
  3613. {
  3614. genTangents(i);
  3615. }
  3616. if (isUnique())
  3617. {
  3618. // Do not bother with an octree for flexi volumes
  3619. S32 tri_count = face.mNumIndices / 3;
  3620. for (S32 j = 0; j < tri_count; ++j)
  3621. {
  3622. U16 idx0 = face.mIndices[j * 3];
  3623. U16 idx1 = face.mIndices[j * 3 + 1];
  3624. U16 idx2 = face.mIndices[j * 3 + 2];
  3625. const LLVector4a& v0 = face.mPositions[idx0];
  3626. const LLVector4a& v1 = face.mPositions[idx1];
  3627. const LLVector4a& v2 = face.mPositions[idx2];
  3628. F32 a, b, t;
  3629. if (LLTriangleRayIntersect(v0, v1, v2, start, dir,
  3630. a, b, t))
  3631. {
  3632. if (t >= 0.f && // if hit is after start
  3633. t <= 1.f && // and before end
  3634. t < closest_t) // and this hit is closer
  3635. {
  3636. closest_t = t;
  3637. hit_face = i;
  3638. if (intersection != NULL)
  3639. {
  3640. intersect = dir;
  3641. intersect.mul(closest_t);
  3642. intersect.add(start);
  3643. *intersection = intersect;
  3644. }
  3645. if (tex_coord != NULL)
  3646. {
  3647. LLVector2* tc = (LLVector2*) face.mTexCoords;
  3648. *tex_coord = (1.f - a - b) * tc[idx0] +
  3649. a * tc[idx1] + b * tc[idx2];
  3650. }
  3651. if (normal != NULL)
  3652. {
  3653. LLVector4a* norm = face.mNormals;
  3654. n1 = norm[idx0];
  3655. n1.mul(1.f - a - b);
  3656. n2 = norm[idx1];
  3657. n2.mul(a);
  3658. n3 = norm[idx2];
  3659. n3.mul(b);
  3660. n1.add(n2);
  3661. n1.add(n3);
  3662. *normal = n1;
  3663. }
  3664. if (tangent_out != NULL)
  3665. {
  3666. LLVector4a* tangents = face.mTangents;
  3667. t1 = tangents[idx0];
  3668. t1.mul(1.f - a - b);
  3669. t2 = tangents[idx1];
  3670. t2.mul(a);
  3671. t3 = tangents[idx2];
  3672. t3.mul(b);
  3673. t1.add(t2);
  3674. t1.add(t3);
  3675. *tangent_out = t1;
  3676. }
  3677. }
  3678. }
  3679. }
  3680. }
  3681. else
  3682. {
  3683. if (!face.mOctree)
  3684. {
  3685. face.createOctree();
  3686. }
  3687. LLOctreeTriangleRayIntersectNoOwnership intersect(start,
  3688. dir, &face,
  3689. &closest_t,
  3690. intersection,
  3691. tex_coord,
  3692. normal,
  3693. tangent_out);
  3694. intersect.traverse(face.mOctree);
  3695. if (intersect.mHitFace)
  3696. {
  3697. hit_face = i;
  3698. }
  3699. }
  3700. }
  3701. }
  3702. return hit_face;
  3703. }
  3704. class LLVertexIndexPair
  3705. {
  3706. public:
  3707. LL_INLINE LLVertexIndexPair(const LLVector3& vertex, S32 index)
  3708. : mVertex(vertex),
  3709. mIndex(index)
  3710. {
  3711. }
  3712. public:
  3713. LLVector3 mVertex;
  3714. S32 mIndex;
  3715. };
  3716. constexpr F32 VERTEX_SLOP = 0.00001f;
  3717. struct lessVertex
  3718. {
  3719. bool operator()(const LLVertexIndexPair* a, const LLVertexIndexPair* b)
  3720. {
  3721. constexpr F32 slop = VERTEX_SLOP;
  3722. if (a->mVertex.mV[0] + slop < b->mVertex.mV[0])
  3723. {
  3724. return true;
  3725. }
  3726. if (a->mVertex.mV[0] - slop > b->mVertex.mV[0])
  3727. {
  3728. return false;
  3729. }
  3730. if (a->mVertex.mV[1] + slop < b->mVertex.mV[1])
  3731. {
  3732. return true;
  3733. }
  3734. if (a->mVertex.mV[1] - slop > b->mVertex.mV[1])
  3735. {
  3736. return false;
  3737. }
  3738. return a->mVertex.mV[2] + slop < b->mVertex.mV[2];
  3739. }
  3740. };
  3741. struct lessTriangle
  3742. {
  3743. bool operator()(const S32* a, const S32* b)
  3744. {
  3745. if (*a < *b)
  3746. {
  3747. return true;
  3748. }
  3749. else if (*a > *b)
  3750. {
  3751. return false;
  3752. }
  3753. if (*(a + 1) < *(b + 1))
  3754. {
  3755. return true;
  3756. }
  3757. else if (*(a + 1) > *(b + 1))
  3758. {
  3759. return false;
  3760. }
  3761. return *(a + 2) < *(b + 2);
  3762. }
  3763. };
  3764. bool LLVolumeParams::importFile(LLFILE* fp)
  3765. {
  3766. constexpr S32 BUFSIZE = 16384;
  3767. char buffer[BUFSIZE];
  3768. // *NOTE: changing the size or type of this buffer would require changing
  3769. // the sscanf below.
  3770. char keyword[256];
  3771. keyword[0] = 0;
  3772. while (!feof(fp))
  3773. {
  3774. if (fgets(buffer, BUFSIZE, fp) == NULL)
  3775. {
  3776. buffer[0] = '\0';
  3777. }
  3778. sscanf(buffer, " %255s", keyword);
  3779. if (!strcmp("{", keyword))
  3780. {
  3781. continue;
  3782. }
  3783. if (!strcmp("}", keyword))
  3784. {
  3785. break;
  3786. }
  3787. else if (!strcmp("profile", keyword))
  3788. {
  3789. mProfileParams.importFile(fp);
  3790. }
  3791. else if (!strcmp("path", keyword))
  3792. {
  3793. mPathParams.importFile(fp);
  3794. }
  3795. else
  3796. {
  3797. llwarns << "Unknown keyword " << keyword << " in volume import."
  3798. << llendl;
  3799. }
  3800. }
  3801. return true;
  3802. }
  3803. bool LLVolumeParams::exportFile(LLFILE* fp) const
  3804. {
  3805. fprintf(fp, "\tshape 0\n");
  3806. fprintf(fp, "\t{\n");
  3807. mPathParams.exportFile(fp);
  3808. mProfileParams.exportFile(fp);
  3809. fprintf(fp, "\t}\n");
  3810. return true;
  3811. }
  3812. bool LLVolumeParams::importLegacyStream(std::istream& input_stream)
  3813. {
  3814. constexpr S32 BUFSIZE = 16384;
  3815. // *NOTE: changing the size or type of this buffer would require changing
  3816. // the sscanf below.
  3817. char buffer[BUFSIZE];
  3818. char keyword[256];
  3819. keyword[0] = 0;
  3820. while (input_stream.good())
  3821. {
  3822. input_stream.getline(buffer, BUFSIZE);
  3823. sscanf(buffer, " %255s", keyword);
  3824. if (!strcmp("{", keyword))
  3825. {
  3826. continue;
  3827. }
  3828. if (!strcmp("}", keyword))
  3829. {
  3830. break;
  3831. }
  3832. if (!strcmp("profile", keyword))
  3833. {
  3834. mProfileParams.importLegacyStream(input_stream);
  3835. }
  3836. else if (!strcmp("path", keyword))
  3837. {
  3838. mPathParams.importLegacyStream(input_stream);
  3839. }
  3840. else
  3841. {
  3842. llwarns << "Unknown keyword " << keyword << " in volume import."
  3843. << llendl;
  3844. }
  3845. }
  3846. return true;
  3847. }
  3848. bool LLVolumeParams::exportLegacyStream(std::ostream& output_stream) const
  3849. {
  3850. output_stream <<"\tshape 0\n";
  3851. output_stream <<"\t{\n";
  3852. mPathParams.exportLegacyStream(output_stream);
  3853. mProfileParams.exportLegacyStream(output_stream);
  3854. output_stream << "\t}\n";
  3855. return true;
  3856. }
  3857. LLSD LLVolumeParams::sculptAsLLSD() const
  3858. {
  3859. LLSD sd = LLSD();
  3860. sd["id"] = getSculptID();
  3861. sd["type"] = getSculptType();
  3862. return sd;
  3863. }
  3864. bool LLVolumeParams::sculptFromLLSD(LLSD& sd)
  3865. {
  3866. setSculptID(sd["id"].asUUID(), (U8)sd["type"].asInteger());
  3867. return true;
  3868. }
  3869. LLSD LLVolumeParams::asLLSD() const
  3870. {
  3871. LLSD sd = LLSD();
  3872. sd["path"] = mPathParams;
  3873. sd["profile"] = mProfileParams;
  3874. sd["sculpt"] = sculptAsLLSD();
  3875. return sd;
  3876. }
  3877. bool LLVolumeParams::fromLLSD(LLSD& sd)
  3878. {
  3879. mPathParams.fromLLSD(sd["path"]);
  3880. mProfileParams.fromLLSD(sd["profile"]);
  3881. sculptFromLLSD(sd["sculpt"]);
  3882. return true;
  3883. }
  3884. void LLVolumeParams::reduceS(F32 begin, F32 end)
  3885. {
  3886. begin = llclampf(begin);
  3887. end = llclampf(end);
  3888. if (begin > end)
  3889. {
  3890. F32 temp = begin;
  3891. begin = end;
  3892. end = temp;
  3893. }
  3894. F32 a = mProfileParams.getBegin();
  3895. F32 b = mProfileParams.getEnd();
  3896. mProfileParams.setBegin(a + begin * (b - a));
  3897. mProfileParams.setEnd(a + end * (b - a));
  3898. }
  3899. void LLVolumeParams::reduceT(F32 begin, F32 end)
  3900. {
  3901. begin = llclampf(begin);
  3902. end = llclampf(end);
  3903. if (begin > end)
  3904. {
  3905. F32 temp = begin;
  3906. begin = end;
  3907. end = temp;
  3908. }
  3909. F32 a = mPathParams.getBegin();
  3910. F32 b = mPathParams.getEnd();
  3911. mPathParams.setBegin(a + begin * (b - a));
  3912. mPathParams.setEnd(a + end * (b - a));
  3913. }
  3914. constexpr F32 MIN_CONCAVE_PROFILE_WEDGE = 0.125f; // 1/8 unity
  3915. constexpr F32 MIN_CONCAVE_PATH_WEDGE = 0.111111f; // 1/9 unity
  3916. // Returns true if the shape can be approximated with a convex shape for
  3917. // collison purposes
  3918. bool LLVolumeParams::isConvex() const
  3919. {
  3920. if (!getSculptID().isNull())
  3921. {
  3922. // Cannot determine, be safe and say no:
  3923. return false;
  3924. }
  3925. F32 path_length = mPathParams.getEnd() - mPathParams.getBegin();
  3926. F32 hollow = mProfileParams.getHollow();
  3927. U8 path_type = mPathParams.getCurveType();
  3928. if (path_length > MIN_CONCAVE_PATH_WEDGE &&
  3929. (mPathParams.getTwistEnd() != mPathParams.getTwistBegin() ||
  3930. (hollow > 0.f && LL_PCODE_PATH_LINE != path_type)))
  3931. {
  3932. // Twist along a "not too short" path is concave
  3933. return false;
  3934. }
  3935. F32 profile_length = mProfileParams.getEnd() - mProfileParams.getBegin();
  3936. bool same_hole = hollow == 0.f ||
  3937. (mProfileParams.getCurveType() &
  3938. LL_PCODE_HOLE_MASK) == LL_PCODE_HOLE_SAME;
  3939. F32 min_profile_wedge = MIN_CONCAVE_PROFILE_WEDGE;
  3940. U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK;
  3941. if (profile_type == LL_PCODE_PROFILE_CIRCLE_HALF)
  3942. {
  3943. // It is a sphere and spheres get twice the minimum profile wedge
  3944. min_profile_wedge = 2.f * MIN_CONCAVE_PROFILE_WEDGE;
  3945. }
  3946. bool convex_profile = // trivially convex
  3947. ((profile_length == 1.f ||
  3948. profile_length <= 0.5f) && hollow == 0.f)
  3949. // effectvely convex (even when hollow)
  3950. || (profile_length <= min_profile_wedge &&
  3951. same_hole);
  3952. if (!convex_profile)
  3953. {
  3954. // Profile is concave
  3955. return false;
  3956. }
  3957. if (path_type == LL_PCODE_PATH_LINE)
  3958. {
  3959. // Straight paths with convex profile
  3960. return true;
  3961. }
  3962. if (path_length < 1.f && path_length > 0.5f)
  3963. {
  3964. // Profile is concave
  3965. return false;
  3966. }
  3967. // We are left with spheres, toroids and tubes
  3968. if (profile_type == LL_PCODE_PROFILE_CIRCLE_HALF)
  3969. {
  3970. // At this stage all spheres must be convex
  3971. return true;
  3972. }
  3973. // If it is a toroid or tube, effectively convex
  3974. return path_length <= MIN_CONCAVE_PATH_WEDGE;
  3975. }
  3976. // Debug
  3977. void LLVolumeParams::setCube()
  3978. {
  3979. mProfileParams.setCurveType(LL_PCODE_PROFILE_SQUARE);
  3980. mProfileParams.setBegin(0.f);
  3981. mProfileParams.setEnd(1.f);
  3982. mProfileParams.setHollow(0.f);
  3983. mPathParams.setBegin(0.f);
  3984. mPathParams.setEnd(1.f);
  3985. mPathParams.setScale(1.f, 1.f);
  3986. mPathParams.setShear(0.f, 0.f);
  3987. mPathParams.setCurveType(LL_PCODE_PATH_LINE);
  3988. mPathParams.setTwistBegin(0.f);
  3989. mPathParams.setTwistEnd(0.f);
  3990. mPathParams.setRadiusOffset(0.f);
  3991. mPathParams.setTaper(0.f, 0.f);
  3992. mPathParams.setRevolutions(0.f);
  3993. mPathParams.setSkew(0.f);
  3994. }
  3995. LLFaceID LLVolume::generateFaceMask()
  3996. {
  3997. LLFaceID new_mask = 0x0000;
  3998. switch (mParams.getProfileParams().getCurveType() & LL_PCODE_PROFILE_MASK)
  3999. {
  4000. case LL_PCODE_PROFILE_CIRCLE:
  4001. case LL_PCODE_PROFILE_CIRCLE_HALF:
  4002. new_mask |= LL_FACE_OUTER_SIDE_0;
  4003. break;
  4004. case LL_PCODE_PROFILE_SQUARE:
  4005. {
  4006. for (S32 side = mParams.getProfileParams().getBegin() * 4.f,
  4007. count = llceil(mParams.getProfileParams().getEnd() * 4.f);
  4008. side < count; ++side)
  4009. {
  4010. new_mask |= LL_FACE_OUTER_SIDE_0 << side;
  4011. }
  4012. break;
  4013. }
  4014. case LL_PCODE_PROFILE_ISOTRI:
  4015. case LL_PCODE_PROFILE_EQUALTRI:
  4016. case LL_PCODE_PROFILE_RIGHTTRI:
  4017. {
  4018. for (S32 side = mParams.getProfileParams().getBegin() * 3.f,
  4019. count = llceil(mParams.getProfileParams().getEnd() * 3.f);
  4020. side < count; ++side)
  4021. {
  4022. new_mask |= LL_FACE_OUTER_SIDE_0 << side;
  4023. }
  4024. break;
  4025. }
  4026. default:
  4027. llerrs << "Unknown profile !" << llendl;
  4028. }
  4029. // Handle hollow objects
  4030. if (mParams.getProfileParams().getHollow() > 0)
  4031. {
  4032. new_mask |= LL_FACE_INNER_SIDE;
  4033. }
  4034. // Handle open profile curves
  4035. if (mProfile.isOpen())
  4036. {
  4037. new_mask |= LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END;
  4038. }
  4039. // Handle open path curves
  4040. if (mPathp->isOpen())
  4041. {
  4042. new_mask |= LL_FACE_PATH_BEGIN | LL_FACE_PATH_END;
  4043. }
  4044. return new_mask;
  4045. }
  4046. bool LLVolume::isFaceMaskValid(LLFaceID face_mask)
  4047. {
  4048. LLFaceID test_mask = 0;
  4049. for (S32 i = 0, count = getNumFaces(); i < count; ++i)
  4050. {
  4051. test_mask |= mProfile.mFaces[i].mFaceID;
  4052. }
  4053. return test_mask == face_mask;
  4054. }
  4055. std::ostream& operator<<(std::ostream& s, const LLProfileParams& prof_params)
  4056. {
  4057. s << "{type=" << (U32)prof_params.mCurveType;
  4058. s << ", begin=" << prof_params.mBegin;
  4059. s << ", end=" << prof_params.mEnd;
  4060. s << ", hollow=" << prof_params.mHollow;
  4061. s << "}";
  4062. return s;
  4063. }
  4064. std::ostream& operator<<(std::ostream& s, const LLPathParams& path_params)
  4065. {
  4066. s << "{type=" << (U32)path_params.mCurveType;
  4067. s << ", begin=" << path_params.mBegin;
  4068. s << ", end=" << path_params.mEnd;
  4069. s << ", twist=" << path_params.mTwistEnd;
  4070. s << ", scale=" << path_params.mScale;
  4071. s << ", shear=" << path_params.mShear;
  4072. s << ", twist_begin=" << path_params.mTwistBegin;
  4073. s << ", radius_offset=" << path_params.mRadiusOffset;
  4074. s << ", taper=" << path_params.mTaper;
  4075. s << ", revolutions=" << path_params.mRevolutions;
  4076. s << ", skew=" << path_params.mSkew;
  4077. s << "}";
  4078. return s;
  4079. }
  4080. std::ostream& operator<<(std::ostream& s, const LLVolumeParams& volume_params)
  4081. {
  4082. s << "{profileparams = " << volume_params.mProfileParams;
  4083. s << ", pathparams = " << volume_params.mPathParams;
  4084. s << "}";
  4085. return s;
  4086. }
  4087. std::ostream& operator<<(std::ostream& s, const LLProfile& profile)
  4088. {
  4089. s << " {open=" << (U32)profile.mOpen;
  4090. s << ", dirty=" << profile.mDirty;
  4091. s << ", totalout=" << profile.mTotalOut;
  4092. s << ", total=" << profile.mTotal;
  4093. s << "}";
  4094. return s;
  4095. }
  4096. std::ostream& operator<<(std::ostream& s, const LLPath& path)
  4097. {
  4098. s << "{open=" << (U32)path.mOpen;
  4099. s << ", dirty=" << path.mDirty;
  4100. s << ", step=" << path.mStep;
  4101. s << ", total=" << path.mTotal;
  4102. s << "}";
  4103. return s;
  4104. }
  4105. std::ostream& operator<<(std::ostream& s, const LLVolume& volume)
  4106. {
  4107. s << "{params = " << volume.getParams();
  4108. s << ", path = " << *volume.mPathp;
  4109. s << ", profile = " << volume.mProfile;
  4110. s << "}";
  4111. return s;
  4112. }
  4113. std::ostream& operator<<(std::ostream& s, const LLVolume* volumep)
  4114. {
  4115. s << "{params = " << volumep->getParams();
  4116. s << ", path = " << *(volumep->mPathp);
  4117. s << ", profile = " << volumep->mProfile;
  4118. s << "}";
  4119. return s;
  4120. }
  4121. LLVolumeFace::LLVolumeFace()
  4122. : mID(0),
  4123. mTypeMask(0),
  4124. mBeginS(0),
  4125. mBeginT(0),
  4126. mNumS(0),
  4127. mNumT(0),
  4128. mNumVertices(0),
  4129. mNumAllocatedVertices(0),
  4130. mNumIndices(0),
  4131. mPositions(NULL),
  4132. mNormals(NULL),
  4133. mTangents(NULL),
  4134. mTexCoords(NULL),
  4135. mIndices(NULL),
  4136. mWeights(NULL),
  4137. mNormalizedScale(1.f, 1.f, 1.f),
  4138. mOctree(NULL),
  4139. mOctreeTriangles(NULL),
  4140. mOptimized(false),
  4141. mWeightsScrubbed(false)
  4142. {
  4143. mExtents = (LLVector4a*)allocate_volume_mem(sizeof(LLVector4a) * 3);
  4144. if (mExtents)
  4145. {
  4146. mExtents[0].splat(-0.5f);
  4147. mExtents[1].splat(0.5f);
  4148. mCenter = mExtents + 2;
  4149. }
  4150. else
  4151. {
  4152. mCenter = NULL;
  4153. }
  4154. }
  4155. LLVolumeFace::LLVolumeFace(const LLVolumeFace& src)
  4156. : mID(0),
  4157. mTypeMask(0),
  4158. mBeginS(0),
  4159. mBeginT(0),
  4160. mNumS(0),
  4161. mNumT(0),
  4162. mNumVertices(0),
  4163. mNumAllocatedVertices(0),
  4164. mNumIndices(0),
  4165. mPositions(NULL),
  4166. mNormals(NULL),
  4167. mTangents(NULL),
  4168. mTexCoords(NULL),
  4169. mIndices(NULL),
  4170. mWeights(NULL),
  4171. mNormalizedScale(1.f, 1.f, 1.f),
  4172. mOctree(NULL),
  4173. mOctreeTriangles(NULL),
  4174. mOptimized(false),
  4175. mWeightsScrubbed(false)
  4176. {
  4177. mExtents = (LLVector4a*)allocate_volume_mem(sizeof(LLVector4a) * 3);
  4178. if (mExtents)
  4179. {
  4180. mCenter = mExtents + 2;
  4181. }
  4182. else
  4183. {
  4184. mCenter = NULL;
  4185. }
  4186. *this = src;
  4187. }
  4188. LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src)
  4189. {
  4190. if (&src == this)
  4191. {
  4192. // Self assignment, do nothing
  4193. return *this;
  4194. }
  4195. mID = src.mID;
  4196. mTypeMask = src.mTypeMask;
  4197. mBeginS = src.mBeginS;
  4198. mBeginT = src.mBeginT;
  4199. mNumS = src.mNumS;
  4200. mNumT = src.mNumT;
  4201. mExtents[0] = src.mExtents[0];
  4202. mExtents[1] = src.mExtents[1];
  4203. *mCenter = *src.mCenter;
  4204. mNumVertices = 0;
  4205. mNumIndices = 0;
  4206. freeData();
  4207. resizeVertices(src.mNumVertices);
  4208. resizeIndices(src.mNumIndices);
  4209. if (mNumVertices)
  4210. {
  4211. S32 vert_size = mNumVertices * sizeof(LLVector4a);
  4212. S32 tc_size = (mNumVertices * sizeof(LLVector2) + 0xF) & ~0xF;
  4213. LLVector4a::memcpyNonAliased16((F32*)mPositions, (F32*)src.mPositions,
  4214. vert_size);
  4215. if (src.mNormals)
  4216. {
  4217. LLVector4a::memcpyNonAliased16((F32*)mNormals, (F32*)src.mNormals,
  4218. vert_size);
  4219. }
  4220. if (src.mTexCoords)
  4221. {
  4222. LLVector4a::memcpyNonAliased16((F32*)mTexCoords,
  4223. (F32*)src.mTexCoords, tc_size);
  4224. }
  4225. if (src.mTangents)
  4226. {
  4227. if (allocateTangents(src.mNumVertices))
  4228. {
  4229. LLVector4a::memcpyNonAliased16((F32*)mTangents,
  4230. (F32*)src.mTangents, vert_size);
  4231. }
  4232. }
  4233. else if (mTangents)
  4234. {
  4235. free_volume_mem(mTangents);
  4236. mTangents = NULL;
  4237. }
  4238. if (src.mWeights)
  4239. {
  4240. if (allocateWeights(src.mNumVertices))
  4241. {
  4242. LLVector4a::memcpyNonAliased16((F32*)mWeights,
  4243. (F32*)src.mWeights, vert_size);
  4244. }
  4245. }
  4246. else if (mWeights)
  4247. {
  4248. free_volume_mem(mWeights);
  4249. mWeights = NULL;
  4250. }
  4251. mWeightsScrubbed = src.mWeightsScrubbed;
  4252. }
  4253. if (mNumIndices)
  4254. {
  4255. S32 idx_size = (mNumIndices * sizeof(U16) + 0xF) & ~0xF;
  4256. LLVector4a::memcpyNonAliased16((F32*)mIndices, (F32*)src.mIndices,
  4257. idx_size);
  4258. }
  4259. mOptimized = src.mOptimized;
  4260. mNormalizedScale = src.mNormalizedScale;
  4261. // delete
  4262. return *this;
  4263. }
  4264. LLVolumeFace::~LLVolumeFace()
  4265. {
  4266. if (mExtents)
  4267. {
  4268. free_volume_mem(mExtents);
  4269. mExtents = mCenter = NULL;
  4270. }
  4271. freeData();
  4272. }
  4273. void LLVolumeFace::freeData()
  4274. {
  4275. if (mPositions)
  4276. {
  4277. free_volume_mem_64(mPositions);
  4278. mPositions = NULL;
  4279. }
  4280. // Normals and texture coordinates are part of the same buffer as
  4281. // mPositions, do not free them separately
  4282. mNormals = NULL;
  4283. mTexCoords = NULL;
  4284. if (mIndices)
  4285. {
  4286. free_volume_mem(mIndices);
  4287. mIndices = NULL;
  4288. }
  4289. if (mTangents)
  4290. {
  4291. free_volume_mem(mTangents);
  4292. mTangents = NULL;
  4293. }
  4294. if (mWeights)
  4295. {
  4296. free_volume_mem(mWeights);
  4297. mWeights = NULL;
  4298. }
  4299. mJointRiggingInfoTab.clear();
  4300. destroyOctree();
  4301. }
  4302. bool LLVolumeFace::create(LLVolume* volume, bool partial_build)
  4303. {
  4304. // Tree for this face is no longer valid
  4305. destroyOctree();
  4306. bool ret = false;
  4307. if (mTypeMask & CAP_MASK)
  4308. {
  4309. ret = createCap(volume, partial_build);
  4310. }
  4311. else if ((mTypeMask & END_MASK) || (mTypeMask & SIDE_MASK))
  4312. {
  4313. ret = createSide(volume, partial_build);
  4314. }
  4315. else
  4316. {
  4317. llerrs << "Unknown/uninitialized face type !" << llendl;
  4318. }
  4319. return ret;
  4320. }
  4321. void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv)
  4322. {
  4323. cv.setPosition(mPositions[index]);
  4324. if (mNormals)
  4325. {
  4326. cv.setNormal(mNormals[index]);
  4327. }
  4328. else
  4329. {
  4330. cv.getNormal().clear();
  4331. }
  4332. if (mTexCoords)
  4333. {
  4334. cv.mTexCoord = mTexCoords[index];
  4335. }
  4336. else
  4337. {
  4338. cv.mTexCoord.clear();
  4339. }
  4340. }
  4341. bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs) const
  4342. {
  4343. return getPosition().equals3(rhs.getPosition()) &&
  4344. mTexCoord == rhs.mTexCoord && getNormal().equals3(rhs.getNormal());
  4345. }
  4346. bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a,
  4347. const LLVector3& b) const
  4348. {
  4349. if (a.mV[0] != b.mV[0])
  4350. {
  4351. return a.mV[0] < b.mV[0];
  4352. }
  4353. if (a.mV[1] != b.mV[1])
  4354. {
  4355. return a.mV[1] < b.mV[1];
  4356. }
  4357. return a.mV[2] < b.mV[2];
  4358. }
  4359. void LLVolumeFace::remap()
  4360. {
  4361. // Generate a remap buffer
  4362. std::vector<U32> remap(mNumVertices);
  4363. // Remap with the U32 indices
  4364. S32 vert_count = LLMeshOptimizer::generateRemapMulti16(remap.data(),
  4365. mIndices,
  4366. mNumIndices,
  4367. mPositions,
  4368. mNormals,
  4369. mTexCoords,
  4370. mNumVertices);
  4371. if (vert_count < 3)
  4372. {
  4373. return; // Nothing to remap or remap failed.
  4374. }
  4375. // Allocate new buffers
  4376. S32 size = ((mNumIndices * sizeof(U16)) + 0xF) & ~0xF;
  4377. U16* remap_idx = (U16*)allocate_volume_mem(size);
  4378. if (!remap_idx)
  4379. {
  4380. LLMemory::allocationFailed();
  4381. llwarns << "Out of memory trying to remap vertices (2)" << llendl;
  4382. return;
  4383. }
  4384. size_t tc_bytes = (vert_count * sizeof(LLVector2) + 0xF) & ~0xF;
  4385. size_t pos_bytes = sizeof(LLVector4a) * 2 * vert_count + tc_bytes;
  4386. LLVector4a* remap_pos = (LLVector4a*)allocate_volume_mem_64(pos_bytes);
  4387. if (!remap_pos)
  4388. {
  4389. LLMemory::allocationFailed();
  4390. llwarns << "Out of memory trying to remap vertices (3)" << llendl;
  4391. free_volume_mem(remap_idx);
  4392. return;
  4393. }
  4394. // Zero all bytes to avoid non-zero padding bytes when vertex structure
  4395. // has gaps.
  4396. // See: https://github.com/zeux/meshoptimizer/blob/master/README.md in
  4397. // the "Indexing" chapter. HB
  4398. memset((void*)remap_pos, 0, pos_bytes);
  4399. LLVector4a* remap_norm = remap_pos + vert_count;
  4400. LLVector2* remap_tc = (LLVector2*)(remap_norm + vert_count);
  4401. // Fill the buffers
  4402. LLMeshOptimizer::remapIndexBuffer16(remap_idx, mIndices, mNumIndices,
  4403. remap.data());
  4404. LLMeshOptimizer::remapVertsBuffer(remap_pos, mPositions, mNumVertices,
  4405. remap.data());
  4406. LLMeshOptimizer::remapVertsBuffer(remap_norm, mNormals, mNumVertices,
  4407. remap.data());
  4408. LLMeshOptimizer::remapTexCoordsBuffer(remap_tc, mTexCoords, mNumVertices,
  4409. remap.data());
  4410. // Free old buffers
  4411. free_volume_mem(mIndices);
  4412. free_volume_mem_64(mPositions);
  4413. // Tangets are now invalid
  4414. free_volume_mem(mTangents);
  4415. // Update volume face using new buffers
  4416. mNumVertices = mNumAllocatedVertices = vert_count;
  4417. mIndices = remap_idx;
  4418. mPositions = remap_pos;
  4419. mNormals = remap_norm;
  4420. mTexCoords = remap_tc;
  4421. mTangents = NULL;
  4422. }
  4423. void LLVolumeFace::optimize(F32 angle_cutoff)
  4424. {
  4425. LLVolumeFace new_face;
  4426. // Map of points to vector of vertices at that point
  4427. std::map<U64, std::vector<VertexMapData> > point_map;
  4428. LLVector4a range;
  4429. range.setSub(mExtents[1], mExtents[0]);
  4430. // Remove redundant vertices
  4431. std::map<U64, std::vector<VertexMapData> >::iterator point_iter;
  4432. LLVector4a pos;
  4433. for (S32 i = 0; i < mNumIndices; ++i)
  4434. {
  4435. U16 index = mIndices[i];
  4436. if (index >= mNumVertices)
  4437. {
  4438. // Invalid index: replace with a valid one to avoid a crash.
  4439. llwarns_once << "Invalid vextex index in volume face "
  4440. << std::hex << (intptr_t)this << std::dec << llendl;
  4441. index = mNumVertices - 1;
  4442. mIndices[i] = index;
  4443. }
  4444. LLVolumeFace::VertexData cv;
  4445. getVertexData(index, cv);
  4446. bool found = false;
  4447. pos.setSub(mPositions[index], mExtents[0]);
  4448. pos.div(range);
  4449. U64 pos64 = (U16)(pos[0] * 65535);
  4450. pos64 = pos64 | (((U64)(pos[1] * 65535)) << 16);
  4451. pos64 = pos64 | (((U64)(pos[2] * 65535)) << 32);
  4452. point_iter = point_map.find(pos64);
  4453. if (point_iter != point_map.end())
  4454. {
  4455. // Duplicate point might exist
  4456. for (S32 j = 0, count = point_iter->second.size(); j < count; ++j)
  4457. {
  4458. LLVolumeFace::VertexData& tv = (point_iter->second)[j];
  4459. if (tv.compareNormal(cv, angle_cutoff))
  4460. {
  4461. found = true;
  4462. new_face.pushIndex((point_iter->second)[j].mIndex);
  4463. break;
  4464. }
  4465. }
  4466. }
  4467. if (!found)
  4468. {
  4469. new_face.pushVertex(cv, mNumIndices);
  4470. U16 index = (U16)new_face.mNumVertices - 1;
  4471. new_face.pushIndex(index);
  4472. VertexMapData d;
  4473. d.setPosition(cv.getPosition());
  4474. d.mTexCoord = cv.mTexCoord;
  4475. d.setNormal(cv.getNormal());
  4476. d.mIndex = index;
  4477. if (point_iter != point_map.end())
  4478. {
  4479. point_iter->second.emplace_back(d);
  4480. }
  4481. else
  4482. {
  4483. point_map[pos64].emplace_back(d);
  4484. }
  4485. }
  4486. }
  4487. if (angle_cutoff > 1.f && !mNormals && new_face.mNormals)
  4488. {
  4489. // NOTE: normals are part of the same buffer as mPositions, do not free
  4490. // them separately.
  4491. new_face.mNormals = NULL;
  4492. }
  4493. if (!mTexCoords && new_face.mTexCoords)
  4494. {
  4495. // NOTE: texture coordinates are part of the same buffer as mPositions,
  4496. // do not free them separately.
  4497. new_face.mTexCoords = NULL;
  4498. }
  4499. // Only swap data if we have actually optimized the mesh
  4500. if (new_face.mNumVertices < mNumVertices &&
  4501. new_face.mNumIndices == mNumIndices)
  4502. {
  4503. LL_DEBUGS("MeshVolume") << "Optimization reached for volume face "
  4504. << std::hex << (intptr_t)this << std::dec
  4505. << " = " << new_face.mNumVertices << "/"
  4506. << mNumVertices << " new/old vertices."
  4507. << LL_ENDL;
  4508. swapData(new_face);
  4509. }
  4510. else
  4511. {
  4512. LL_DEBUGS("MeshVolume") << "No optimization possible for volume face "
  4513. << std::hex << (intptr_t)this << std::dec
  4514. << LL_ENDL;
  4515. }
  4516. }
  4517. // Data structure for tangent generation
  4518. class MikktData
  4519. {
  4520. protected:
  4521. LOG_CLASS(MikktData);
  4522. public:
  4523. MikktData(LLVolumeFace* f)
  4524. : face(f)
  4525. {
  4526. U32 count = face->mNumIndices;
  4527. p.resize(count);
  4528. n.resize(count);
  4529. tc.resize(count);
  4530. t.resize(count);
  4531. bool has_weights = face->mWeights != NULL;
  4532. if (has_weights)
  4533. {
  4534. w.resize(count);
  4535. }
  4536. LLVector3 inv_scale(1.f / face->mNormalizedScale.mV[0],
  4537. 1.f / face->mNormalizedScale.mV[1],
  4538. 1.f / face->mNormalizedScale.mV[2]);
  4539. for (S32 i = 0, count = face->mNumIndices; i < count; ++i)
  4540. {
  4541. S32 idx = face->mIndices[i];
  4542. p[i].set(face->mPositions[idx].getF32ptr());
  4543. // Put mesh in original coordinate frame when reconstructing
  4544. // tangents.
  4545. p[i].scaleVec(face->mNormalizedScale);
  4546. n[i].set(face->mNormals[idx].getF32ptr());
  4547. n[i].scaleVec(inv_scale);
  4548. n[i].normalize();
  4549. tc[i].set(face->mTexCoords[idx]);
  4550. if (idx >= face->mNumVertices)
  4551. {
  4552. // Invalid index: replace with a valid index to avoid crashes.
  4553. LL_DEBUGS("MeshVolume") << "Invalid index: " << idx << LL_ENDL;
  4554. idx = face->mNumVertices - 1;
  4555. face->mIndices[i] = idx;
  4556. }
  4557. if (has_weights)
  4558. {
  4559. w[i].set(face->mWeights[idx].getF32ptr());
  4560. }
  4561. }
  4562. }
  4563. LL_INLINE U32 GetNumFaces() const
  4564. {
  4565. return face->mNumIndices / 3;
  4566. }
  4567. LL_INLINE U32 GetNumVerticesOfFace(U32) const
  4568. {
  4569. return 3;
  4570. }
  4571. LL_INLINE mikk::float3 GetPosition(U32 face_num, U32 vert_num) const
  4572. {
  4573. const F32* v = p[face_num * 3 + vert_num].mV;
  4574. return mikk::float3(v);
  4575. }
  4576. LL_INLINE mikk::float3 GetTexCoord(U32 face_num, U32 vert_num) const
  4577. {
  4578. const F32* uv = tc[face_num * 3 + vert_num].mV;
  4579. return mikk::float3(uv[0], uv[1], 1.f);
  4580. }
  4581. LL_INLINE mikk::float3 GetNormal(U32 face_num, U32 vert_num) const
  4582. {
  4583. const F32* normal = n[face_num * 3 + vert_num].mV;
  4584. return mikk::float3(normal);
  4585. }
  4586. LL_INLINE void SetTangentSpace(U32 face_num, U32 vert_num, mikk::float3 f,
  4587. bool orientation)
  4588. {
  4589. U32 i = face_num * 3 + vert_num;
  4590. t[i].set(f.x, f.y, f.z, orientation ? 1.f : -1.f);
  4591. }
  4592. public:
  4593. LLVolumeFace* face;
  4594. std::vector<LLVector3> p;
  4595. std::vector<LLVector3> n;
  4596. std::vector<LLVector2> tc;
  4597. std::vector<LLVector4> w;
  4598. std::vector<LLVector4> t;
  4599. };
  4600. bool LLVolumeFace::cacheOptimize(bool gen_tangents)
  4601. {
  4602. if (mOptimized)
  4603. {
  4604. llwarns << "Already optimized, ignoring." << llendl;
  4605. llassert(false);
  4606. return true;
  4607. }
  4608. mOptimized = true;
  4609. if (!mIndices)
  4610. {
  4611. llwarns << "NULL mIndices, aborting." << llendl;
  4612. // Bad mesh data: report a failure.
  4613. return false;
  4614. }
  4615. // New PBR viewer code, used when gen_tangents (= gUsePBRShaders) is true.
  4616. if (gen_tangents && mNormals && mTexCoords)
  4617. {
  4618. // Generate mikkt space tangents before cache optimizing since the
  4619. // index buffer may change; a bit of a hack to do this here, but this
  4620. // method gets called exactly once for the lifetime of a mesh and is
  4621. // executed on a background thread.
  4622. MikktData data(this);
  4623. mikk::Mikktspace ctx(data);
  4624. ctx.genTangSpace();
  4625. // Re-weld
  4626. meshopt_Stream mos[] =
  4627. {
  4628. { &data.p[0], sizeof(LLVector3), sizeof(LLVector3) },
  4629. { &data.n[0], sizeof(LLVector3), sizeof(LLVector3) },
  4630. { &data.t[0], sizeof(LLVector4), sizeof(LLVector4) },
  4631. { &data.tc[0], sizeof(LLVector2), sizeof(LLVector2) },
  4632. { data.w.empty() ? NULL : &data.w[0],
  4633. sizeof(LLVector4), sizeof(LLVector4) }
  4634. };
  4635. std::vector<U32> remap;
  4636. try
  4637. {
  4638. remap.resize(data.p.size());
  4639. }
  4640. catch (const std::bad_alloc&)
  4641. {
  4642. LLMemory::allocationFailed();
  4643. llwarns << "Out of memory trying to generate tangents" << llendl;
  4644. return false;
  4645. }
  4646. U32 stream_count = data.w.empty() ? 4 : 5;
  4647. U32 vert_count = 0;
  4648. if (!data.p.empty())
  4649. {
  4650. vert_count = meshopt_generateVertexRemapMulti(&remap[0], NULL,
  4651. data.p.size(),
  4652. data.p.size(), mos,
  4653. stream_count);
  4654. }
  4655. if (vert_count && vert_count < 65535)
  4656. {
  4657. bool success= resizeVertices(vert_count);
  4658. if (success && !data.w.empty())
  4659. {
  4660. success = allocateWeights(vert_count);
  4661. }
  4662. if (success)
  4663. {
  4664. success = allocateTangents(mNumVertices);
  4665. }
  4666. if (!success)
  4667. {
  4668. LLMemory::allocationFailed();
  4669. llwarns << "Out of memory trying to generate tangents"
  4670. << llendl;
  4671. return false;
  4672. }
  4673. for (S32 i = 0; i < mNumIndices; ++i)
  4674. {
  4675. U32 src_idx = i;
  4676. U32 dst_idx = remap[i];
  4677. if ((S32)dst_idx > mNumVertices)
  4678. {
  4679. dst_idx = mNumVertices - 1;
  4680. llwarns_once << "Too large a destination index: using last index"
  4681. << llendl;
  4682. }
  4683. mIndices[i] = dst_idx;
  4684. mPositions[dst_idx].load3(data.p[src_idx].mV);
  4685. mNormals[dst_idx].load3(data.n[src_idx].mV);
  4686. mTexCoords[dst_idx] = data.tc[src_idx];
  4687. mTangents[dst_idx].loadua(data.t[src_idx].mV);
  4688. if (mWeights)
  4689. {
  4690. mWeights[dst_idx].loadua(data.w[src_idx].mV);
  4691. }
  4692. }
  4693. // Put back in normalized coordinate frame
  4694. LLVector4a inv_scale(1.f / mNormalizedScale.mV[0],
  4695. 1.f / mNormalizedScale.mV[1],
  4696. 1.f / mNormalizedScale.mV[2]);
  4697. LLVector4a scale;
  4698. scale.load3(mNormalizedScale.mV);
  4699. scale.getF32ptr()[3] = 1.f;
  4700. for (S32 i = 0; i < mNumVertices; ++i)
  4701. {
  4702. mPositions[i].mul(inv_scale);
  4703. mNormals[i].mul(scale);
  4704. mNormals[i].normalize3();
  4705. F32 w = mTangents[i].getF32ptr()[3];
  4706. mTangents[i].mul(scale);
  4707. mTangents[i].normalize3();
  4708. mTangents[i].getF32ptr()[3] = w;
  4709. }
  4710. }
  4711. else
  4712. {
  4713. // Blew past the max vertex size limit, use legacy tangent
  4714. // generation which never adds verts.
  4715. createTangents();
  4716. }
  4717. // Cache-optimize index buffer; meshopt needs scratch space, do some
  4718. // pointer shuffling to avoid an extra index buffer copy.
  4719. U16* src_indices = mIndices;
  4720. mIndices = NULL;
  4721. resizeIndices(mNumIndices);
  4722. meshopt_optimizeVertexCache<U16>(mIndices, src_indices, mNumIndices,
  4723. mNumVertices);
  4724. free_volume_mem(src_indices);
  4725. return true;
  4726. }
  4727. // Pre-PBR code.
  4728. if (mNumVertices < 3 || mNumIndices < 3)
  4729. {
  4730. // Nothing to do
  4731. return true;
  4732. }
  4733. // Check indices validity and "fix" bogus ones if needed, since otherwise
  4734. // meshoptimizer would likely assert and thus crash in case of an issue
  4735. // with them... HB
  4736. for (S32 i = 0; i < mNumIndices; ++i)
  4737. {
  4738. if (mIndices[i] >= mNumVertices)
  4739. {
  4740. // Invalid index: replace with a valid one to avoid a crash.
  4741. llwarns_once << "Invalid vextex index in volume face "
  4742. << std::hex << (intptr_t)this << std::dec << llendl;
  4743. mIndices[i] = mNumVertices - 1;
  4744. }
  4745. }
  4746. struct buffer_data_t
  4747. {
  4748. // Double pointer to volume attribute data. Avoids fixup after
  4749. // reallocating buffers on resize.
  4750. void** dst;
  4751. // Scratch buffer. Allocated with vertices count from
  4752. // meshopt_generateVertexRemapMulti()
  4753. void* scratch;
  4754. // Stride between contiguous attributes
  4755. size_t stride;
  4756. };
  4757. // Contains data needed by meshopt_generateVertexRemapMulti()
  4758. std::vector<buffer_data_t> buffers;
  4759. // Contains data needed by meshopt_remapVertexBuffer()
  4760. std::vector<meshopt_Stream> streams;
  4761. static struct
  4762. {
  4763. size_t offs;
  4764. size_t size;
  4765. size_t stride;
  4766. } ref_streams[] =
  4767. {
  4768. {
  4769. offsetof(LLVolumeFace, mPositions),
  4770. sizeof(float) * 3,
  4771. sizeof(mPositions[0])
  4772. },
  4773. {
  4774. offsetof(LLVolumeFace, mNormals),
  4775. sizeof(float) * 3,
  4776. sizeof(mNormals[0])
  4777. },
  4778. {
  4779. offsetof(LLVolumeFace, mTexCoords),
  4780. sizeof(float) * 2,
  4781. sizeof(mTexCoords[0])
  4782. },
  4783. {
  4784. offsetof(LLVolumeFace, mWeights),
  4785. sizeof(float) * 3,
  4786. sizeof(mWeights[0])
  4787. },
  4788. {
  4789. offsetof(LLVolumeFace, mTangents),
  4790. sizeof(float) * 3,
  4791. sizeof(mTangents[0])
  4792. },
  4793. };
  4794. constexpr size_t ref_streams_elements = LL_ARRAY_SIZE(ref_streams);
  4795. for (size_t i = 0; i < ref_streams_elements; ++i)
  4796. {
  4797. void** ptr =
  4798. reinterpret_cast<void**>((char*)this + ref_streams[i].offs);
  4799. if (*ptr)
  4800. {
  4801. streams.push_back({ *ptr, ref_streams[i].size,
  4802. ref_streams[i].stride });
  4803. buffers.push_back({ ptr, NULL, ref_streams[i].stride });
  4804. }
  4805. }
  4806. std::vector<unsigned int> remap;
  4807. try
  4808. {
  4809. remap.reserve(mNumIndices);
  4810. }
  4811. catch (const std::bad_alloc&)
  4812. {
  4813. LLMemory::allocationFailed();
  4814. llwarns << "Out of memory trying to optimize vertices" << llendl;
  4815. return false;
  4816. }
  4817. size_t total_verts = meshopt_generateVertexRemapMulti(remap.data(),
  4818. mIndices,
  4819. mNumIndices,
  4820. mNumVertices,
  4821. streams.data(),
  4822. streams.size());
  4823. meshopt_remapIndexBuffer(mIndices, mIndices, mNumIndices, remap.data());
  4824. bool failed = false;
  4825. for (S32 i = 0, count = buffers.size(); i < count; ++i)
  4826. {
  4827. buffer_data_t& entry = buffers[i];
  4828. void* buf_tmp = allocate_volume_mem(entry.stride * total_verts);
  4829. if (!buf_tmp)
  4830. {
  4831. failed = true;
  4832. break;
  4833. }
  4834. entry.scratch = buf_tmp;
  4835. // Write to scratch buffer
  4836. meshopt_remapVertexBuffer(entry.scratch, *entry.dst, mNumVertices,
  4837. entry.stride, remap.data());
  4838. }
  4839. if (failed)
  4840. {
  4841. for (S32 i = 0, count = buffers.size(); i < count; ++i)
  4842. {
  4843. buffer_data_t& entry = buffers[i];
  4844. if (entry.scratch)
  4845. {
  4846. free_volume_mem(entry.scratch);
  4847. }
  4848. }
  4849. LLMemory::allocationFailed();
  4850. llwarns << "Out of memory trying to optimize vertices" << llendl;
  4851. }
  4852. else if (mNumAllocatedVertices != (S32)total_verts)
  4853. {
  4854. if (!resizeVertices(total_verts))
  4855. {
  4856. failed = true;
  4857. }
  4858. else if (mWeights && !allocateWeights(total_verts))
  4859. {
  4860. failed = true;
  4861. }
  4862. else if (mTangents && !allocateTangents(total_verts))
  4863. {
  4864. failed = true;
  4865. }
  4866. }
  4867. if (failed)
  4868. {
  4869. for (S32 i = 0, count = buffers.size(); i < count; ++i)
  4870. {
  4871. buffer_data_t& entry = buffers[i];
  4872. if (entry.scratch)
  4873. {
  4874. free_volume_mem(entry.scratch);
  4875. }
  4876. }
  4877. return false;
  4878. }
  4879. meshopt_optimizeVertexCache(mIndices, mIndices, mNumIndices, total_verts);
  4880. #if 0 // Do not do that: it causes rendering glitches with some meshes. HB
  4881. meshopt_optimizeOverdraw(mIndices, mIndices, mNumIndices,
  4882. (float*)buffers[0].scratch, total_verts,
  4883. buffers[0].stride, 1.05f);
  4884. #endif
  4885. meshopt_optimizeVertexFetchRemap(remap.data(), mIndices, mNumIndices,
  4886. total_verts);
  4887. meshopt_remapIndexBuffer(mIndices, mIndices, mNumIndices, remap.data());
  4888. for (S32 i = 0, count = buffers.size(); i < count; ++i)
  4889. {
  4890. buffer_data_t& entry = buffers[i];
  4891. // Write to LLVolume attribute buffer
  4892. meshopt_remapVertexBuffer(*entry.dst, entry.scratch, total_verts,
  4893. entry.stride, remap.data());
  4894. // Release scratch buffer
  4895. if (entry.scratch)
  4896. {
  4897. free_volume_mem(entry.scratch);
  4898. }
  4899. }
  4900. mNumVertices = total_verts;
  4901. return true;
  4902. }
  4903. void LLVolumeFace::createOctree(F32 scaler, const LLVector4a& center0,
  4904. const LLVector4a& size0)
  4905. {
  4906. if (mOctree)
  4907. {
  4908. return;
  4909. }
  4910. mOctree = new LLVolumeOctree(center0, size0);
  4911. // Initialize all the triangles we need
  4912. const U32 num_triangles = mNumIndices / 3;
  4913. mOctreeTriangles = new LLVolumeTriangle[num_triangles];
  4914. LLVector4a min, max, center, size;
  4915. for (U32 tri_idx = 0; tri_idx < num_triangles; ++tri_idx)
  4916. {
  4917. // For each triangle
  4918. LLVolumeTriangle* tri = &mOctreeTriangles[tri_idx];
  4919. const U32 index = 3 * tri_idx;
  4920. const LLVector4a& v0 = mPositions[mIndices[index]];
  4921. const LLVector4a& v1 = mPositions[mIndices[index + 1]];
  4922. const LLVector4a& v2 = mPositions[mIndices[index + 2]];
  4923. // Store pointers to vertex data
  4924. tri->mV[0] = &v0;
  4925. tri->mV[1] = &v1;
  4926. tri->mV[2] = &v2;
  4927. // Store indices
  4928. tri->mIndex[0] = mIndices[index];
  4929. tri->mIndex[1] = mIndices[index + 1];
  4930. tri->mIndex[2] = mIndices[index + 2];
  4931. // Get minimum point
  4932. min = v0;
  4933. min.setMin(min, v1);
  4934. min.setMin(min, v2);
  4935. // Get maximum point
  4936. max = v0;
  4937. max.setMax(max, v1);
  4938. max.setMax(max, v2);
  4939. // Compute center
  4940. center.setAdd(min, max);
  4941. center.mul(0.5f);
  4942. tri->mPositionGroup = center;
  4943. // Compute "radius"
  4944. size.setSub(max, min);
  4945. tri->mRadius = size.getLength3().getF32() * scaler;
  4946. // Insert
  4947. mOctree->insert(tri);
  4948. }
  4949. // Remove unneeded octree layers
  4950. while (!mOctree->balance()) ;
  4951. // Calculate AABB for each node
  4952. LLVolumeOctreeRebound rebound;
  4953. rebound.traverse(mOctree);
  4954. if (gDebugGL)
  4955. {
  4956. LLVolumeOctreeValidateNoOwnership validate;
  4957. validate.traverse(mOctree);
  4958. }
  4959. }
  4960. void LLVolumeFace::destroyOctree()
  4961. {
  4962. if (mOctree)
  4963. {
  4964. delete mOctree;
  4965. mOctree = NULL;
  4966. }
  4967. if (mOctreeTriangles)
  4968. {
  4969. delete[] mOctreeTriangles;
  4970. mOctreeTriangles = NULL;
  4971. }
  4972. }
  4973. void LLVolumeFace::swapData(LLVolumeFace& rhs)
  4974. {
  4975. std::swap(rhs.mPositions, mPositions);
  4976. std::swap(rhs.mNormals, mNormals);
  4977. std::swap(rhs.mTangents, mTangents);
  4978. std::swap(rhs.mTexCoords, mTexCoords);
  4979. std::swap(rhs.mIndices, mIndices);
  4980. std::swap(rhs.mNumVertices, mNumVertices);
  4981. std::swap(rhs.mNumIndices, mNumIndices);
  4982. }
  4983. void lerp_planar_vert(LLVolumeFace::VertexData& v0,
  4984. LLVolumeFace::VertexData& v1,
  4985. LLVolumeFace::VertexData& v2,
  4986. LLVolumeFace::VertexData& vout,
  4987. F32 coef01, F32 coef02)
  4988. {
  4989. LLVector4a lhs;
  4990. lhs.setSub(v1.getPosition(), v0.getPosition());
  4991. lhs.mul(coef01);
  4992. LLVector4a rhs;
  4993. rhs.setSub(v2.getPosition(), v0.getPosition());
  4994. rhs.mul(coef02);
  4995. rhs.add(lhs);
  4996. rhs.add(v0.getPosition());
  4997. vout.setPosition(rhs);
  4998. vout.mTexCoord = v0.mTexCoord + (v1.mTexCoord - v0.mTexCoord) * coef01 +
  4999. (v2.mTexCoord - v0.mTexCoord) * coef02;
  5000. vout.setNormal(v0.getNormal());
  5001. }
  5002. bool LLVolumeFace::createUnCutCubeCap(LLVolume* volume, bool partial_build)
  5003. {
  5004. const LLAlignedArray<LLVector4a, 64>& mesh = volume->getMesh();
  5005. const LLAlignedArray<LLVector4a, 64>& profile = volume->getProfile().mVertices;
  5006. S32 max_s = volume->getProfile().getTotal();
  5007. S32 max_t = volume->getPath().mPath.size();
  5008. S32 grid_size = (profile.size() - 1) / 4;
  5009. LLVector4a& min = mExtents[0];
  5010. LLVector4a& max = mExtents[1];
  5011. S32 offset = 0;
  5012. if (mTypeMask & TOP_MASK)
  5013. {
  5014. offset = (max_t - 1) * max_s;
  5015. }
  5016. else
  5017. {
  5018. offset = mBeginS;
  5019. }
  5020. {
  5021. VertexData corners[4];
  5022. VertexData base_vert;
  5023. for (S32 t = 0; t < 4; ++t)
  5024. {
  5025. corners[t].getPosition().load3(mesh[offset +
  5026. grid_size * t].getF32ptr());
  5027. corners[t].mTexCoord.mV[0] = profile[grid_size * t][0] + 0.5f;
  5028. corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size * t][1];
  5029. }
  5030. {
  5031. LLVector4a lhs;
  5032. lhs.setSub(corners[1].getPosition(), corners[0].getPosition());
  5033. LLVector4a rhs;
  5034. rhs.setSub(corners[2].getPosition(), corners[1].getPosition());
  5035. base_vert.getNormal().setCross3(lhs, rhs);
  5036. base_vert.getNormal().normalize3fast();
  5037. }
  5038. if (!(mTypeMask & TOP_MASK))
  5039. {
  5040. base_vert.getNormal().mul(-1.f);
  5041. }
  5042. else
  5043. {
  5044. // Swap the UVs on the U(X) axis for top face
  5045. LLVector2 swap;
  5046. swap = corners[0].mTexCoord;
  5047. corners[0].mTexCoord = corners[3].mTexCoord;
  5048. corners[3].mTexCoord = swap;
  5049. swap = corners[1].mTexCoord;
  5050. corners[1].mTexCoord = corners[2].mTexCoord;
  5051. corners[2].mTexCoord = swap;
  5052. }
  5053. S32 size = (grid_size + 1) * (grid_size + 1);
  5054. resizeVertices(size);
  5055. LLVector4a* pos = (LLVector4a*)mPositions;
  5056. LLVector4a* norm = (LLVector4a*)mNormals;
  5057. LLVector2* tc = (LLVector2*)mTexCoords;
  5058. VertexData new_vert;
  5059. for (S32 gx = 0; gx <= grid_size; ++gx)
  5060. {
  5061. for (S32 gy = 0; gy <= grid_size; ++gy)
  5062. {
  5063. lerp_planar_vert(corners[0], corners[1], corners[3], new_vert,
  5064. (F32)gx / (F32)grid_size,
  5065. (F32)gy / (F32)grid_size);
  5066. *pos++ = new_vert.getPosition();
  5067. *norm++ = base_vert.getNormal();
  5068. *tc++ = new_vert.mTexCoord;
  5069. if (gx == 0 && gy == 0)
  5070. {
  5071. min = new_vert.getPosition();
  5072. max = min;
  5073. }
  5074. else
  5075. {
  5076. min.setMin(min, new_vert.getPosition());
  5077. max.setMax(max, new_vert.getPosition());
  5078. }
  5079. }
  5080. }
  5081. mCenter->setAdd(min, max);
  5082. mCenter->mul(0.5f);
  5083. }
  5084. if (!partial_build)
  5085. {
  5086. size_t num_indices = grid_size * grid_size * 6;
  5087. resizeIndices(num_indices);
  5088. if (!volume->isMeshAssetLoaded() || mEdge.size() < num_indices)
  5089. {
  5090. mEdge.resize(num_indices);
  5091. }
  5092. U16* out = mIndices;
  5093. S32 cur_edge = 0;
  5094. S32 idxs[] = { 0, 1, grid_size + 2, grid_size + 2, grid_size + 1, 0 };
  5095. for (S32 gx = 0; gx < grid_size; ++gx)
  5096. {
  5097. for (S32 gy = 0; gy < grid_size; ++gy)
  5098. {
  5099. if (mTypeMask & TOP_MASK)
  5100. {
  5101. for (S32 i = 5; i >= 0; --i)
  5102. {
  5103. *out++ = gy * (grid_size + 1) + gx + idxs[i];
  5104. }
  5105. S32 edge_value = grid_size * 2 * gy + gx * 2;
  5106. if (gx > 0)
  5107. {
  5108. mEdge[cur_edge++] = edge_value;
  5109. }
  5110. else
  5111. {
  5112. mEdge[cur_edge++] = -1; // Mark face to higlight it
  5113. }
  5114. if (gy < grid_size - 1)
  5115. {
  5116. mEdge[cur_edge++] = edge_value;
  5117. }
  5118. else
  5119. {
  5120. mEdge[cur_edge++] = -1;
  5121. }
  5122. mEdge[cur_edge++] = edge_value;
  5123. if (gx < grid_size - 1)
  5124. {
  5125. mEdge[cur_edge++] = edge_value;
  5126. }
  5127. else
  5128. {
  5129. mEdge[cur_edge++] = -1;
  5130. }
  5131. if (gy > 0)
  5132. {
  5133. mEdge[cur_edge++] = edge_value;
  5134. }
  5135. else
  5136. {
  5137. mEdge[cur_edge++] = -1;
  5138. }
  5139. mEdge[cur_edge++] = edge_value;
  5140. }
  5141. else
  5142. {
  5143. for (S32 i = 0; i < 6; ++i)
  5144. {
  5145. *out++ = gy * (grid_size + 1) + gx + idxs[i];
  5146. }
  5147. S32 edge_value = grid_size * 2 * gy + gx * 2;
  5148. if (gy > 0)
  5149. {
  5150. mEdge[cur_edge++] = edge_value;
  5151. }
  5152. else
  5153. {
  5154. mEdge[cur_edge++] = -1;
  5155. }
  5156. if (gx < grid_size - 1)
  5157. {
  5158. mEdge[cur_edge++] = edge_value;
  5159. }
  5160. else
  5161. {
  5162. mEdge[cur_edge++] = -1;
  5163. }
  5164. mEdge[cur_edge++] = edge_value;
  5165. if (gy < grid_size - 1)
  5166. {
  5167. mEdge[cur_edge++] = edge_value;
  5168. }
  5169. else
  5170. {
  5171. mEdge[cur_edge++] = -1;
  5172. }
  5173. if (gx > 0)
  5174. {
  5175. mEdge[cur_edge++] = edge_value;
  5176. }
  5177. else
  5178. {
  5179. mEdge[cur_edge++] = -1;
  5180. }
  5181. mEdge[cur_edge++] = edge_value;
  5182. }
  5183. }
  5184. }
  5185. }
  5186. return true;
  5187. }
  5188. bool LLVolumeFace::createCap(LLVolume* volume, bool partial_build)
  5189. {
  5190. constexpr U32 HOLLOW_OR_OPEN_MASK = HOLLOW_MASK | OPEN_MASK;
  5191. const LLPathParams& params = volume->getParams().getPathParams();
  5192. if (!(mTypeMask & HOLLOW_OR_OPEN_MASK) &&
  5193. params.getBegin() == 0.f && params.getEnd()== 1.f &&
  5194. params.getCurveType() == LL_PCODE_PATH_LINE &&
  5195. volume->getParams().getProfileParams().getCurveType() ==
  5196. LL_PCODE_PROFILE_SQUARE)
  5197. {
  5198. return createUnCutCubeCap(volume, partial_build);
  5199. }
  5200. S32 num_vertices = 0, num_indices = 0;
  5201. const LLAlignedArray<LLVector4a, 64>& mesh = volume->getMesh();
  5202. const LLAlignedArray<LLVector4a, 64>& profile =
  5203. volume->getProfile().mVertices;
  5204. // All types of caps have the same number of vertices and indices
  5205. num_vertices = profile.size();
  5206. num_indices = (num_vertices - 2) * 3;
  5207. if (!(mTypeMask & HOLLOW_OR_OPEN_MASK))
  5208. {
  5209. resizeVertices(num_vertices + 1);
  5210. #if 0
  5211. if (!partial_build)
  5212. #endif
  5213. {
  5214. resizeIndices(num_indices + 3);
  5215. }
  5216. }
  5217. else
  5218. {
  5219. resizeVertices(num_vertices);
  5220. #if 0
  5221. if (!partial_build)
  5222. #endif
  5223. {
  5224. resizeIndices(num_indices);
  5225. }
  5226. }
  5227. S32 max_s = volume->getProfile().getTotal();
  5228. S32 max_t = volume->getPath().mPath.size();
  5229. mCenter->clear();
  5230. S32 offset = 0;
  5231. if (mTypeMask & TOP_MASK)
  5232. {
  5233. offset = (max_t - 1) * max_s;
  5234. }
  5235. else
  5236. {
  5237. offset = mBeginS;
  5238. }
  5239. // Figure out the normal, assume all caps are flat faces. Cross product to
  5240. // get normals.
  5241. LLVector2 cuv, min_uv, max_uv;
  5242. LLVector4a& min = mExtents[0];
  5243. LLVector4a& max = mExtents[1];
  5244. LLVector2* tc = (LLVector2*)mTexCoords;
  5245. LLVector4a* pos = (LLVector4a*)mPositions;
  5246. LLVector4a* norm = (LLVector4a*)mNormals;
  5247. // Copy the vertices into the array
  5248. const LLVector4a* src = mesh.mArray + offset;
  5249. const LLVector4a* end = src + num_vertices;
  5250. min = *src;
  5251. max = min;
  5252. const LLVector4a* p = profile.mArray;
  5253. if (mTypeMask & TOP_MASK)
  5254. {
  5255. min_uv.set((*p)[0] + 0.5f, (*p)[1] + 0.5f);
  5256. max_uv = min_uv;
  5257. while (src < end)
  5258. {
  5259. tc->mV[0] = (*p)[0] + 0.5f;
  5260. tc->mV[1] = (*p++)[1] + 0.5f;
  5261. llassert(src->isFinite3());
  5262. update_min_max(min, max, *src);
  5263. update_min_max(min_uv, max_uv, *tc++);
  5264. *pos++ = *src++;
  5265. }
  5266. }
  5267. else
  5268. {
  5269. min_uv.set((*p)[0] + 0.5f, 0.5f - (*p)[1]);
  5270. max_uv = min_uv;
  5271. while (src < end)
  5272. {
  5273. // Mirror for underside.
  5274. tc->mV[0] = (*p)[0] + 0.5f;
  5275. tc->mV[1] = 0.5f - (*p++)[1];
  5276. llassert(src->isFinite3());
  5277. update_min_max(min, max, *src);
  5278. update_min_max(min_uv, max_uv, *tc++);
  5279. *pos++ = *src++;
  5280. }
  5281. }
  5282. mCenter->setAdd(min, max);
  5283. mCenter->mul(0.5f);
  5284. cuv = (min_uv + max_uv) * 0.5f;
  5285. VertexData vd;
  5286. vd.setPosition(*mCenter);
  5287. vd.mTexCoord = cuv;
  5288. if (!(mTypeMask & HOLLOW_OR_OPEN_MASK))
  5289. {
  5290. *pos++ = *mCenter;
  5291. *tc++ = cuv;
  5292. ++num_vertices;
  5293. }
  5294. #if 0
  5295. if (partial_build)
  5296. {
  5297. return true;
  5298. }
  5299. #endif
  5300. if (mTypeMask & HOLLOW_MASK)
  5301. {
  5302. if (mTypeMask & TOP_MASK)
  5303. {
  5304. // HOLLOW TOP
  5305. // Does it matter if it's open or closed ? - djs
  5306. S32 pt1 = 0, pt2 = num_vertices - 1;
  5307. S32 i = 0;
  5308. while (pt2 - pt1 > 1)
  5309. {
  5310. // Use the profile points instead of the mesh, since you want
  5311. // the un-transformed profile distances.
  5312. const LLVector4a& p1 = profile[pt1];
  5313. const LLVector4a& p2 = profile[pt2];
  5314. const LLVector4a& pa = profile[pt1 + 1];
  5315. const LLVector4a& pb = profile[pt2 - 1];
  5316. const F32* p1V = p1.getF32ptr();
  5317. const F32* p2V = p2.getF32ptr();
  5318. const F32* paV = pa.getF32ptr();
  5319. const F32* pbV = pb.getF32ptr();
  5320. // Use area of triangle to determine backfacing
  5321. F32 area_1a2, area_1ba, area_21b, area_2ab;
  5322. area_1a2 = p1V[0] * paV[1] - paV[0] * p1V[1] +
  5323. paV[0] * p2V[1] - p2V[0] * paV[1] +
  5324. p2V[0] * p1V[1] - p1V[0] * p2V[1];
  5325. area_1ba = p1V[0] * pbV[1] - pbV[0] * p1V[1] +
  5326. pbV[0] * paV[1] - paV[0] * pbV[1] +
  5327. paV[0] * p1V[1] - p1V[0] * paV[1];
  5328. area_21b = p2V[0] * p1V[1] - p1V[0] * p2V[1] +
  5329. p1V[0] * pbV[1] - pbV[0] * p1V[1] +
  5330. pbV[0] * p2V[1] - p2V[0] * pbV[1];
  5331. area_2ab = p2V[0] * paV[1] - paV[0] * p2V[1] +
  5332. paV[0] * pbV[1] - pbV[0] * paV[1] +
  5333. pbV[0] * p2V[1] - p2V[0] * pbV[1];
  5334. bool use_tri1a2 = true;
  5335. bool tri_1a2 = true;
  5336. bool tri_21b = true;
  5337. if (area_1a2 < 0)
  5338. {
  5339. tri_1a2 = false;
  5340. }
  5341. if (area_2ab < 0)
  5342. {
  5343. // Cannot use, because it contains point b
  5344. tri_1a2 = false;
  5345. }
  5346. if (area_21b < 0)
  5347. {
  5348. tri_21b = false;
  5349. }
  5350. if (area_1ba < 0)
  5351. {
  5352. // Cannot use, because it contains point b
  5353. tri_21b = false;
  5354. }
  5355. if (!tri_1a2)
  5356. {
  5357. use_tri1a2 = false;
  5358. }
  5359. else if (!tri_21b)
  5360. {
  5361. use_tri1a2 = true;
  5362. }
  5363. else
  5364. {
  5365. LLVector4a d1;
  5366. d1.setSub(p1, pa);
  5367. LLVector4a d2;
  5368. d2.setSub(p2, pb);
  5369. use_tri1a2 = d1.dot3(d1) < d2.dot3(d2);
  5370. }
  5371. if (use_tri1a2)
  5372. {
  5373. mIndices[i++] = pt1;
  5374. mIndices[i++] = ++pt1;
  5375. mIndices[i++] = pt2;
  5376. }
  5377. else
  5378. {
  5379. mIndices[i++] = pt1;
  5380. mIndices[i++] = pt2 - 1;
  5381. mIndices[i++] = pt2--;
  5382. }
  5383. }
  5384. }
  5385. else
  5386. {
  5387. // HOLLOW BOTTOM
  5388. // Does it matter if it's open or closed? - djs
  5389. llassert(mTypeMask & BOTTOM_MASK);
  5390. S32 pt1 = 0, pt2 = num_vertices - 1;
  5391. S32 i = 0;
  5392. while (pt2 - pt1 > 1)
  5393. {
  5394. // Use the profile points instead of the mesh, since you want
  5395. // the un-transformed profile distances.
  5396. const LLVector4a& p1 = profile[pt1];
  5397. const LLVector4a& p2 = profile[pt2];
  5398. const LLVector4a& pa = profile[pt1 + 1];
  5399. const LLVector4a& pb = profile[pt2 - 1];
  5400. const F32* p1V = p1.getF32ptr();
  5401. const F32* p2V = p2.getF32ptr();
  5402. const F32* paV = pa.getF32ptr();
  5403. const F32* pbV = pb.getF32ptr();
  5404. // Use area of triangle to determine backfacing
  5405. F32 area_1a2, area_1ba, area_21b, area_2ab;
  5406. area_1a2 = p1V[0] * paV[1] - paV[0] * p1V[1] +
  5407. paV[0] * p2V[1] - p2V[0] * paV[1] +
  5408. p2V[0] * p1V[1] - p1V[0] * p2V[1];
  5409. area_1ba = p1V[0] * pbV[1] - pbV[0] * p1V[1] +
  5410. pbV[0] * paV[1] - paV[0] * pbV[1] +
  5411. paV[0] * p1V[1] - p1V[0] * paV[1];
  5412. area_21b = p2V[0] * p1V[1] - p1V[0] * p2V[1] +
  5413. p1V[0] * pbV[1] - pbV[0] * p1V[1] +
  5414. pbV[0] * p2V[1] - p2V[0] * pbV[1];
  5415. area_2ab = p2V[0] * paV[1] - paV[0] * p2V[1] +
  5416. paV[0] * pbV[1] - pbV[0] * paV[1] +
  5417. pbV[0] * p2V[1] - p2V[0] * pbV[1];
  5418. bool use_tri1a2 = true;
  5419. bool tri_1a2 = true;
  5420. bool tri_21b = true;
  5421. if (area_1a2 < 0)
  5422. {
  5423. tri_1a2 = false;
  5424. }
  5425. if (area_2ab < 0)
  5426. {
  5427. // Cannot use, because it contains point b
  5428. tri_1a2 = false;
  5429. }
  5430. if (area_21b < 0)
  5431. {
  5432. tri_21b = false;
  5433. }
  5434. if (area_1ba < 0)
  5435. {
  5436. // Cannot use, because it contains point b
  5437. tri_21b = false;
  5438. }
  5439. if (!tri_1a2)
  5440. {
  5441. use_tri1a2 = false;
  5442. }
  5443. else if (!tri_21b)
  5444. {
  5445. use_tri1a2 = true;
  5446. }
  5447. else
  5448. {
  5449. LLVector4a d1;
  5450. d1.setSub(p1, pa);
  5451. LLVector4a d2;
  5452. d2.setSub(p2, pb);
  5453. use_tri1a2 = d1.dot3(d1) < d2.dot3(d2);
  5454. }
  5455. // Flipped backfacing from top
  5456. if (use_tri1a2)
  5457. {
  5458. mIndices[i++] = pt1;
  5459. mIndices[i++] = pt2;
  5460. mIndices[i++] = ++pt1;
  5461. }
  5462. else
  5463. {
  5464. mIndices[i++] = pt1;
  5465. mIndices[i++] = pt2;
  5466. mIndices[i++] = --pt2;
  5467. }
  5468. }
  5469. }
  5470. }
  5471. else
  5472. {
  5473. // Not hollow, generate the triangle fan.
  5474. U16 v1 = 2;
  5475. U16 v2 = 1;
  5476. if (mTypeMask & TOP_MASK)
  5477. {
  5478. v1 = 1;
  5479. v2 = 2;
  5480. }
  5481. for (S32 i = 0; i < num_vertices - 2; ++i)
  5482. {
  5483. mIndices[3 * i] = num_vertices - 1;
  5484. mIndices[3 * i + v1] = i;
  5485. mIndices[3 * i + v2] = i + 1;
  5486. }
  5487. }
  5488. LLVector4a d0, d1;
  5489. d0.setSub(mPositions[mIndices[1]], mPositions[mIndices[0]]);
  5490. d1.setSub(mPositions[mIndices[2]], mPositions[mIndices[0]]);
  5491. LLVector4a normal;
  5492. normal.setCross3(d0, d1);
  5493. if (normal.dot3(normal).getF32() > F_APPROXIMATELY_ZERO)
  5494. {
  5495. normal.normalize3fast();
  5496. }
  5497. // Degenerate, make up a value
  5498. else if (normal.getF32ptr()[2] >= 0)
  5499. {
  5500. normal.set(0.f, 0.f, 1.f);
  5501. }
  5502. else
  5503. {
  5504. normal.set(0.f, 0.f, -1.f);
  5505. }
  5506. llassert(llfinite(normal.getF32ptr()[0]));
  5507. llassert(llfinite(normal.getF32ptr()[1]));
  5508. llassert(llfinite(normal.getF32ptr()[2]));
  5509. llassert(!llisnan(normal.getF32ptr()[0]));
  5510. llassert(!llisnan(normal.getF32ptr()[1]));
  5511. llassert(!llisnan(normal.getF32ptr()[2]));
  5512. for (S32 i = 0; i < num_vertices; ++i)
  5513. {
  5514. norm[i].load4a(normal.getF32ptr());
  5515. }
  5516. return true;
  5517. }
  5518. // Adapted from Lengyel, Eric. "Computing Tangent Space Basis Vectors for an
  5519. // Arbitrary Mesh". Terathon Software 3D Graphics Library, 2001.
  5520. // http://www.terathon.com/code/tangent.html
  5521. // Adapted from Lengyel, Eric. "Computing Tangent Space Basis Vectors for an
  5522. // Arbitrary Mesh". Terathon Software 3D Graphics Library, 2001.
  5523. // http://www.terathon.com/code/tangent.html
  5524. bool calculate_tangent_array(const U32 vertex_count, const LLVector4a* vertp,
  5525. const LLVector4a* normp, const LLVector2* tcoordp,
  5526. U32 triangle_count, const U16* indexp,
  5527. LLVector4a* tangentp)
  5528. {
  5529. const size_t size = vertex_count * 2 * sizeof(LLVector4a);
  5530. LLVector4a* tan1 = (LLVector4a*)allocate_volume_mem(size);
  5531. if (!tan1) return false;
  5532. LLVector4a* tan2 = tan1 + vertex_count;
  5533. if (size > 0)
  5534. {
  5535. memset((void*)tan1, 0, size);
  5536. }
  5537. for (U32 a = 0; a < triangle_count; ++a)
  5538. {
  5539. U32 i1 = *indexp++;
  5540. U32 i2 = *indexp++;
  5541. U32 i3 = *indexp++;
  5542. const LLVector4a& v1 = vertp[i1];
  5543. const LLVector4a& v2 = vertp[i2];
  5544. const LLVector4a& v3 = vertp[i3];
  5545. const LLVector2& w1 = tcoordp[i1];
  5546. const LLVector2& w2 = tcoordp[i2];
  5547. const LLVector2& w3 = tcoordp[i3];
  5548. const F32* v1ptr = v1.getF32ptr();
  5549. const F32* v2ptr = v2.getF32ptr();
  5550. const F32* v3ptr = v3.getF32ptr();
  5551. F32 x1 = v2ptr[0] - v1ptr[0];
  5552. F32 x2 = v3ptr[0] - v1ptr[0];
  5553. F32 y1 = v2ptr[1] - v1ptr[1];
  5554. F32 y2 = v3ptr[1] - v1ptr[1];
  5555. F32 z1 = v2ptr[2] - v1ptr[2];
  5556. F32 z2 = v3ptr[2] - v1ptr[2];
  5557. F32 s1 = w2.mV[0] - w1.mV[0];
  5558. F32 s2 = w3.mV[0] - w1.mV[0];
  5559. F32 t1 = w2.mV[1] - w1.mV[1];
  5560. F32 t2 = w3.mV[1] - w1.mV[1];
  5561. F32 rd = s1 * t2 - s2 * t1;
  5562. F32 r = rd * rd > FLT_EPSILON ? 1.f / rd
  5563. : (rd > 0.f ? 1024.f : -1024.f);
  5564. llassert(llfinite(r) && !llisnan(r));
  5565. LLVector4a sdir((t2 * x1 - t1 * x2) * r, (t2 * y1 - t1 * y2) * r,
  5566. (t2 * z1 - t1 * z2) * r);
  5567. LLVector4a tdir((s1 * x2 - s2 * x1) * r, (s1 * y2 - s2 * y1) * r,
  5568. (s1 * z2 - s2 * z1) * r);
  5569. tan1[i1].add(sdir);
  5570. tan1[i2].add(sdir);
  5571. tan1[i3].add(sdir);
  5572. tan2[i1].add(tdir);
  5573. tan2[i2].add(tdir);
  5574. tan2[i3].add(tdir);
  5575. }
  5576. LLVector4a n, ncrosst, tsubn;
  5577. for (U32 a = 0; a < vertex_count; ++a)
  5578. {
  5579. n = normp[a];
  5580. const LLVector4a& t = tan1[a];
  5581. ncrosst.setCross3(n, t);
  5582. // Gram-Schmidt orthogonalize
  5583. n.mul(n.dot3(t).getF32());
  5584. tsubn.setSub(t, n);
  5585. if (tsubn.dot3(tsubn).getF32() > F_APPROXIMATELY_ZERO)
  5586. {
  5587. tsubn.normalize3fast();
  5588. // Calculate handedness
  5589. F32 handedness = ncrosst.dot3(tan2[a]).getF32() < 0.f ? -1.f : 1.f;
  5590. tsubn.getF32ptr()[3] = handedness;
  5591. tangentp[a] = tsubn;
  5592. }
  5593. else
  5594. {
  5595. // Degenerate, make up a value
  5596. tangentp[a].set(0.f, 0.f, 1.f, 1.f);
  5597. }
  5598. }
  5599. free_volume_mem(tan1);
  5600. return true;
  5601. }
  5602. void LLVolumeFace::createTangents()
  5603. {
  5604. if (!mTangents)
  5605. {
  5606. if (!allocateTangents(mNumVertices))
  5607. {
  5608. LLMemory::allocationFailed();
  5609. llwarns << "Out of memory error while calculating tangents !"
  5610. << llendl;
  5611. return;
  5612. }
  5613. // Generate tangents
  5614. LLVector4a* ptr = (LLVector4a*)mTangents;
  5615. LLVector4a* end = mTangents + mNumVertices;
  5616. while (ptr < end)
  5617. {
  5618. (*ptr++).clear();
  5619. }
  5620. if (!calculate_tangent_array(mNumVertices, mPositions, mNormals,
  5621. mTexCoords, mNumIndices / 3, mIndices,
  5622. mTangents))
  5623. {
  5624. LLMemory::allocationFailed();
  5625. llwarns << "Out of memory error while calculating tangents !"
  5626. << llendl;
  5627. return;
  5628. }
  5629. // Normalize normals
  5630. for (S32 i = 0; i < mNumVertices; ++i)
  5631. {
  5632. // Bump map/planar projection code requires normals to be
  5633. // normalized
  5634. mNormals[i].normalize3fast();
  5635. }
  5636. }
  5637. }
  5638. bool LLVolumeFace::resizeVertices(S32 num_verts)
  5639. {
  5640. if (mPositions)
  5641. {
  5642. free_volume_mem_64(mPositions);
  5643. mPositions = NULL;
  5644. }
  5645. // NOTE: mNormals and mTexCoords are part of mPositions: do not free them !
  5646. mNormals = NULL;
  5647. mTexCoords = NULL;
  5648. if (mTangents)
  5649. {
  5650. free_volume_mem(mTangents);
  5651. mTangents = NULL;
  5652. }
  5653. mNumVertices = num_verts > 0 ? num_verts : 0;
  5654. mNumAllocatedVertices = mNumVertices;
  5655. if (mNumVertices)
  5656. {
  5657. // Pad texture coordinate block end to allow for QWORD reads
  5658. size_t size = (num_verts * sizeof(LLVector2) + 0xF) & ~0xF;
  5659. S32 bytes = sizeof(LLVector4a) * 2 * num_verts + size;
  5660. mPositions = (LLVector4a*)allocate_volume_mem_64(bytes);
  5661. if (!mPositions)
  5662. {
  5663. LLMemory::allocationFailed(bytes);
  5664. llwarns << "Out of memory while resizing vertex positions !"
  5665. << llendl;
  5666. mNumVertices = mNumAllocatedVertices = 0;
  5667. return false;
  5668. }
  5669. ll_assert_aligned(mPositions, 64);
  5670. // Zero all bytes to avoid non-zero padding bytes when vertex structure
  5671. // has gaps.
  5672. // See: https://github.com/zeux/meshoptimizer/blob/master/README.md in
  5673. // the "Indexing" chapter. HB
  5674. memset((void*)mPositions, 0, bytes);
  5675. mNormals = mPositions + num_verts;
  5676. mTexCoords = (LLVector2*)(mNormals + num_verts);
  5677. }
  5678. // Force update
  5679. mJointRiggingInfoTab.clear();
  5680. return true;
  5681. }
  5682. void LLVolumeFace::pushVertex(const LLVolumeFace::VertexData& cv,
  5683. S32 max_indice)
  5684. {
  5685. pushVertex(cv.getPosition(), cv.getNormal(), cv.mTexCoord, max_indice);
  5686. }
  5687. void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm,
  5688. const LLVector2& tc, S32 max_indice)
  5689. {
  5690. S32 new_verts = mNumVertices + 1;
  5691. if (new_verts > mNumAllocatedVertices)
  5692. {
  5693. if (new_verts < max_indice)
  5694. {
  5695. if (new_verts < max_indice / 2)
  5696. {
  5697. // It is very unlikely that we will manage to optimize beyond
  5698. // the point of halving the number of vertices...
  5699. new_verts = max_indice / 2;
  5700. }
  5701. else
  5702. {
  5703. S32 delta = llmin((max_indice - new_verts) / 2, 2);
  5704. new_verts = new_verts + delta < max_indice ? new_verts + delta
  5705. : max_indice;
  5706. }
  5707. }
  5708. S32 new_tc_size = ((new_verts * 8) + 0xF) & ~0xF;
  5709. S32 old_tc_size = ((mNumVertices * 8) + 0xF) & ~0xF;
  5710. S32 old_vsize = mNumVertices * 16;
  5711. S32 new_size = new_verts * 16 * 2 + new_tc_size;
  5712. LLVector4a* old_buf = mPositions;
  5713. mPositions = (LLVector4a*)allocate_volume_mem_64(new_size);
  5714. if (!mPositions && new_verts != mNumVertices + 1)
  5715. {
  5716. LLMemory::allocationFailed(new_size);
  5717. // out of memory: try to allocate the exact required amount instead
  5718. new_verts = mNumVertices + 1;
  5719. new_tc_size = ((new_verts * 8) + 0xF) & ~0xF;
  5720. old_tc_size = ((mNumVertices * 8) + 0xF) & ~0xF;
  5721. old_vsize = mNumVertices * 16;
  5722. new_size = new_verts * 16 * 2 + new_tc_size;
  5723. mPositions = (LLVector4a*)allocate_volume_mem_64(new_size);
  5724. }
  5725. if (!mPositions)
  5726. {
  5727. LLMemory::allocationFailed();
  5728. mPositions = old_buf;
  5729. llwarns << "Out of memory while reallocating vertex data !"
  5730. << llendl;
  5731. return;
  5732. }
  5733. // Zero all bytes to avoid non-zero padding bytes when vertex structure
  5734. // has gaps.
  5735. // See: https://github.com/zeux/meshoptimizer/blob/master/README.md in
  5736. // the "Indexing" chapter. HB
  5737. memset((void*)mPositions, 0, new_size);
  5738. mNormals = mPositions + new_verts;
  5739. mTexCoords = (LLVector2*)(mNormals + new_verts);
  5740. if (mNumVertices && old_buf)
  5741. {
  5742. LLVector4a::memcpyNonAliased16((F32*)mPositions, (F32*)old_buf,
  5743. old_vsize);
  5744. LLVector4a::memcpyNonAliased16((F32*)mNormals,
  5745. (F32*)(old_buf + mNumVertices),
  5746. old_vsize);
  5747. LLVector4a::memcpyNonAliased16((F32*)mTexCoords,
  5748. (F32*)(old_buf + mNumVertices * 2),
  5749. old_tc_size);
  5750. }
  5751. // Just clear tangents
  5752. if (mTangents)
  5753. {
  5754. free_volume_mem(mTangents);
  5755. mTangents = NULL;
  5756. }
  5757. mNumAllocatedVertices = new_verts;
  5758. }
  5759. mPositions[mNumVertices] = pos;
  5760. mNormals[mNumVertices] = norm;
  5761. mTexCoords[mNumVertices++] = tc;
  5762. }
  5763. bool LLVolumeFace::allocateTangents(S32 num_verts)
  5764. {
  5765. if (mTangents)
  5766. {
  5767. free_volume_mem(mTangents);
  5768. }
  5769. mTangents = (LLVector4a*)allocate_volume_mem(sizeof(LLVector4a) *
  5770. num_verts);
  5771. if (mTangents)
  5772. {
  5773. return true;
  5774. }
  5775. LLMemory::allocationFailed();
  5776. llwarns << "Out of memory trying to allocate " << num_verts << " tangents"
  5777. << llendl;
  5778. return false;
  5779. }
  5780. bool LLVolumeFace::allocateWeights(S32 num_verts)
  5781. {
  5782. if (mWeights)
  5783. {
  5784. free_volume_mem(mWeights);
  5785. }
  5786. mWeights = (LLVector4a*)allocate_volume_mem(sizeof(LLVector4a) *
  5787. num_verts);
  5788. if (mWeights)
  5789. {
  5790. return true;
  5791. }
  5792. LLMemory::allocationFailed();
  5793. llwarns << "Out of memory trying to allocate " << num_verts << " weigths"
  5794. << llendl;
  5795. return false;
  5796. }
  5797. bool LLVolumeFace::resizeIndices(S32 num_indices)
  5798. {
  5799. if (mNumIndices == num_indices)
  5800. {
  5801. return true;
  5802. }
  5803. if (mIndices)
  5804. {
  5805. free_volume_mem(mIndices);
  5806. }
  5807. if (num_indices < 0)
  5808. {
  5809. llwarns << "Negative number of indices passed (" << num_indices
  5810. << "). Zeored." << llendl;
  5811. return false;
  5812. }
  5813. if (num_indices == 0)
  5814. {
  5815. mIndices = NULL;
  5816. mNumIndices = 0;
  5817. return true;
  5818. }
  5819. // Pad index block end to allow for QWORD reads
  5820. S32 size = ((num_indices * sizeof(U16)) + 0xF) & ~0xF;
  5821. mIndices = (U16*)allocate_volume_mem(size);
  5822. if (mIndices)
  5823. {
  5824. mNumIndices = num_indices;
  5825. return true;
  5826. }
  5827. mNumIndices = 0;
  5828. LLMemory::allocationFailed();
  5829. llwarns << "Out of memory trying to allocate " << num_indices << " indices"
  5830. << llendl;
  5831. return false;
  5832. }
  5833. void LLVolumeFace::pushIndex(const U16& idx)
  5834. {
  5835. S32 new_count = mNumIndices + 1;
  5836. S32 new_size = (new_count * 2 + 0xF) & ~0xF;
  5837. S32 old_size = (mNumIndices * 2 + 0xF) & ~0xF;
  5838. if (new_size != old_size)
  5839. {
  5840. mIndices = (U16*)realloc_volume_mem(mIndices, new_size, old_size);
  5841. ll_assert_aligned(mIndices, 16);
  5842. }
  5843. mIndices[mNumIndices++] = idx;
  5844. }
  5845. void LLVolumeFace::fillFromLegacyData(std::vector<LLVolumeFace::VertexData>& v,
  5846. std::vector<U16>& idx)
  5847. {
  5848. resizeVertices(v.size());
  5849. resizeIndices(idx.size());
  5850. for (S32 i = 0, count = v.size(); i < count; ++i)
  5851. {
  5852. mPositions[i] = v[i].getPosition();
  5853. mNormals[i] = v[i].getNormal();
  5854. mTexCoords[i] = v[i].mTexCoord;
  5855. }
  5856. for (S32 i = 0, count = idx.size(); i < count; ++i)
  5857. {
  5858. mIndices[i] = idx[i];
  5859. }
  5860. }
  5861. bool LLVolumeFace::createSide(LLVolume* volume, bool partial_build)
  5862. {
  5863. bool flat = (mTypeMask & FLAT_MASK) != 0;
  5864. U8 sculpt_type = volume->getParams().getSculptType();
  5865. U8 sculpt_stitching = sculpt_type & LL_SCULPT_TYPE_MASK;
  5866. bool sculpt_invert = (sculpt_type & LL_SCULPT_FLAG_INVERT) != 0;
  5867. bool sculpt_mirror = (sculpt_type & LL_SCULPT_FLAG_MIRROR) != 0;
  5868. bool sculpt_reverse_horizontal = sculpt_invert ? !sculpt_mirror
  5869. : sculpt_mirror;
  5870. S32 num_vertices, num_indices;
  5871. const LLAlignedArray<LLVector4a, 64>& mesh = volume->getMesh();
  5872. const LLAlignedArray<LLVector4a, 64>& profile =
  5873. volume->getProfile().mVertices;
  5874. const LLAlignedArray<LLPath::PathPt, 64>& path_data =
  5875. volume->getPath().mPath;
  5876. S32 max_s = volume->getProfile().getTotal();
  5877. S32 s, t, i;
  5878. F32 ss, tt;
  5879. num_vertices = mNumS * mNumT;
  5880. num_indices = (mNumS - 1) * (mNumT - 1) * 6;
  5881. if (num_vertices > mNumVertices || num_indices > mNumIndices)
  5882. {
  5883. partial_build = false;
  5884. }
  5885. if (!partial_build)
  5886. {
  5887. resizeVertices(num_vertices);
  5888. resizeIndices(num_indices);
  5889. if (!volume->isMeshAssetLoaded())
  5890. {
  5891. mEdge.resize(num_indices);
  5892. }
  5893. }
  5894. LLVector4a* pos = (LLVector4a*)mPositions;
  5895. LLVector2* tc = (LLVector2*)mTexCoords;
  5896. F32 begin_stex = llfloor(profile[mBeginS][2]);
  5897. S32 num_s = ((mTypeMask & INNER_MASK) &&
  5898. (mTypeMask & FLAT_MASK) && mNumS > 2) ? mNumS / 2 : mNumS;
  5899. S32 cur_vertex = 0;
  5900. S32 end_t = mBeginT + mNumT;
  5901. bool test = (mTypeMask & INNER_MASK) && (mTypeMask & FLAT_MASK) &&
  5902. mNumS > 2;
  5903. // Copy the vertices into the array
  5904. for (t = mBeginT; t < end_t; ++t)
  5905. {
  5906. tt = path_data[t].mTexT;
  5907. for (s = 0; s < num_s; ++s)
  5908. {
  5909. if (mTypeMask & END_MASK)
  5910. {
  5911. if (s)
  5912. {
  5913. ss = 1.f;
  5914. }
  5915. else
  5916. {
  5917. ss = 0.f;
  5918. }
  5919. }
  5920. // Get s value for tex-coord.
  5921. else
  5922. {
  5923. S32 index = mBeginS + s;
  5924. if (index >= (S32)profile.size())
  5925. {
  5926. ss = flat ? 1.f - begin_stex : 1.f;
  5927. }
  5928. else if (flat)
  5929. {
  5930. ss = profile[index][2] - begin_stex;
  5931. }
  5932. else
  5933. {
  5934. ss = profile[index][2];
  5935. }
  5936. }
  5937. if (sculpt_reverse_horizontal)
  5938. {
  5939. ss = 1.f - ss;
  5940. }
  5941. // Check to see if this triangle wraps around the array.
  5942. if (mBeginS + s >= max_s)
  5943. {
  5944. // We are wrapping
  5945. i = mBeginS + s + max_s * (t - 1);
  5946. }
  5947. else
  5948. {
  5949. i = mBeginS + s + max_s * t;
  5950. }
  5951. mesh[i].store4a((F32*)(pos + cur_vertex));
  5952. tc[cur_vertex++].set(ss, tt);
  5953. if (test && s > 0)
  5954. {
  5955. mesh[i].store4a((F32*)(pos + cur_vertex));
  5956. tc[cur_vertex++].set(ss, tt);
  5957. }
  5958. }
  5959. if (test)
  5960. {
  5961. if (mTypeMask & OPEN_MASK)
  5962. {
  5963. s = num_s - 1;
  5964. }
  5965. else
  5966. {
  5967. s = 0;
  5968. }
  5969. i = mBeginS + s + max_s * t;
  5970. ss = profile[mBeginS + s][2] - begin_stex;
  5971. mesh[i].store4a((F32*)(pos + cur_vertex));
  5972. tc[cur_vertex++].set(ss, tt);
  5973. }
  5974. }
  5975. mCenter->clear();
  5976. LLVector4a* cur_pos = pos;
  5977. LLVector4a* end_pos = pos + mNumVertices;
  5978. // Get bounding box for this side
  5979. LLVector4a face_min;
  5980. LLVector4a face_max;
  5981. face_min = face_max = *cur_pos++;
  5982. while (cur_pos < end_pos)
  5983. {
  5984. update_min_max(face_min, face_max, *cur_pos++);
  5985. }
  5986. mExtents[0] = face_min;
  5987. mExtents[1] = face_max;
  5988. U32 tc_count = mNumVertices;
  5989. if (tc_count % 2 == 1)
  5990. {
  5991. // Odd number of texture coordinates, duplicate last entry to padded
  5992. // end of array
  5993. ++tc_count;
  5994. mTexCoords[mNumVertices] = mTexCoords[mNumVertices - 1];
  5995. }
  5996. LLVector4a* cur_tc = (LLVector4a*)mTexCoords;
  5997. LLVector4a* end_tc = (LLVector4a*)(mTexCoords + tc_count);
  5998. LLVector4a tc_min;
  5999. LLVector4a tc_max;
  6000. tc_min = tc_max = *cur_tc++;
  6001. while (cur_tc < end_tc)
  6002. {
  6003. update_min_max(tc_min, tc_max, *cur_tc++);
  6004. }
  6005. F32* minp = tc_min.getF32ptr();
  6006. F32* maxp = tc_max.getF32ptr();
  6007. mTexCoordExtents[0].mV[0] = llmin(minp[0], minp[2]);
  6008. mTexCoordExtents[0].mV[1] = llmin(minp[1], minp[3]);
  6009. mTexCoordExtents[1].mV[0] = llmax(maxp[0], maxp[2]);
  6010. mTexCoordExtents[1].mV[1] = llmax(maxp[1], maxp[3]);
  6011. mCenter->setAdd(face_min, face_max);
  6012. mCenter->mul(0.5f);
  6013. S32 cur_index = 0;
  6014. S32 cur_edge = 0;
  6015. bool flat_face = (mTypeMask & FLAT_MASK) != 0;
  6016. if (!partial_build)
  6017. {
  6018. // Now we generate the indices.
  6019. for (t = 0; t < mNumT - 1; ++t)
  6020. {
  6021. for (s = 0; s < mNumS - 1; ++s)
  6022. {
  6023. S32 bottom_left = s + mNumS * t;
  6024. mIndices[cur_index++] = bottom_left;
  6025. S32 top_right = s + 1 + mNumS * (t + 1);
  6026. mIndices[cur_index++] = top_right;
  6027. mIndices[cur_index++] = s + mNumS * (t + 1); // top left
  6028. mIndices[cur_index++] = bottom_left;
  6029. mIndices[cur_index++] = s + 1 + mNumS * t; // bottom right
  6030. mIndices[cur_index++] = top_right;
  6031. // Bottom left/top right neighbor face
  6032. mEdge[cur_edge++] = (mNumS - 1) * 2 * t + s * 2 + 1;
  6033. if (t < mNumT - 2)
  6034. {
  6035. // Top right/top left neighbor face
  6036. mEdge[cur_edge++] = (mNumS - 1) * 2 * (t + 1) + s * 2 + 1;
  6037. }
  6038. else if (mNumT <= 3 || volume->getPath().isOpen())
  6039. {
  6040. // No neighbor
  6041. mEdge[cur_edge++] = -1;
  6042. }
  6043. else
  6044. {
  6045. // Wrap on T
  6046. mEdge[cur_edge++] = s * 2 + 1;
  6047. }
  6048. if (s > 0)
  6049. {
  6050. // Top left/bottom left neighbor face
  6051. mEdge[cur_edge++] = (mNumS - 1) * 2 * t + s * 2 - 1;
  6052. }
  6053. else if (flat_face || volume->getProfile().isOpen())
  6054. {
  6055. // No neighbor
  6056. mEdge[cur_edge++] = -1;
  6057. }
  6058. else
  6059. {
  6060. // Wrap on S
  6061. mEdge[cur_edge++] = (mNumS - 1) * 2 * t + (mNumS - 2) * 2 +
  6062. 1;
  6063. }
  6064. if (t > 0)
  6065. {
  6066. // bottom left/bottom right neighbor face
  6067. mEdge[cur_edge++] = (mNumS - 1) * 2 * (t - 1) + s * 2;
  6068. }
  6069. else if (mNumT <= 3 || volume->getPath().isOpen())
  6070. {
  6071. // No neighbor
  6072. mEdge[cur_edge++] = -1;
  6073. }
  6074. else
  6075. {
  6076. // Wrap on T
  6077. mEdge[cur_edge++] = (mNumS - 1) * 2 * (mNumT - 2) + s * 2;
  6078. }
  6079. if (s < mNumS - 2)
  6080. {
  6081. // Bottom right/top right neighbor face
  6082. mEdge[cur_edge++] = (mNumS - 1) * 2 * t + (s + 1) * 2;
  6083. }
  6084. else if (flat_face || volume->getProfile().isOpen())
  6085. {
  6086. // No neighbor
  6087. mEdge[cur_edge++] = -1;
  6088. }
  6089. else
  6090. {
  6091. // Wrap on S
  6092. mEdge[cur_edge++] = (mNumS - 1) * 2 * t;
  6093. }
  6094. // Top right/bottom left neighbor face
  6095. mEdge[cur_edge++] = (mNumS - 1) * 2 * t + s * 2;
  6096. }
  6097. }
  6098. }
  6099. // Clear normals
  6100. F32* dst = (F32*)mNormals;
  6101. F32* end = (F32*)(mNormals + mNumVertices);
  6102. LLVector4a zero = LLVector4a::getZero();
  6103. while (dst < end)
  6104. {
  6105. zero.store4a(dst);
  6106. dst += 4;
  6107. }
  6108. // Generate normals
  6109. U32 count = mNumIndices / 3;
  6110. LLVector4a* norm = mNormals;
  6111. // thread_local instead of static, in case this method would be called by
  6112. // another thread than the main thread in the future (I do not think it is
  6113. // the case for now, but what happened with LLProfile::addHole() makes me
  6114. // wary). HB
  6115. thread_local LLAlignedArray<LLVector4a, 64> triangle_normals;
  6116. triangle_normals.resize(count);
  6117. LLVector4a* output = triangle_normals.mArray;
  6118. LLVector4a* end_output = output + count;
  6119. U16* idx = mIndices;
  6120. LLVector4a b, v1, v2;
  6121. while (output < end_output)
  6122. {
  6123. b.load4a((F32*)(pos + idx[0]));
  6124. v1.load4a((F32*)(pos + idx[1]));
  6125. v2.load4a((F32*)(pos + idx[2]));
  6126. // Calculate triangle normal
  6127. LLVector4a a;
  6128. a.setSub(b, v1);
  6129. b.sub(v2);
  6130. LLQuad& vector1 = *((LLQuad*)&v1);
  6131. LLQuad& vector2 = *((LLQuad*)&v2);
  6132. LLQuad& amQ = *((LLQuad*)&a);
  6133. LLQuad& bmQ = *((LLQuad*)&b);
  6134. // Vectors are stored in memory in w, z, y, x order from high to low
  6135. // Set vector1 = { a[W], a[X], a[Z], a[Y] }
  6136. vector1 = _mm_shuffle_ps(amQ, amQ, _MM_SHUFFLE(3, 0, 2, 1));
  6137. // Set vector2 = { b[W], b[Y], b[X], b[Z] }
  6138. vector2 = _mm_shuffle_ps(bmQ, bmQ, _MM_SHUFFLE(3, 1, 0, 2));
  6139. // mQ = { a[W]*b[W], a[X]*b[Y], a[Z]*b[X], a[Y]*b[Z] }
  6140. vector2 = _mm_mul_ps(vector1, vector2);
  6141. // vector3 = { a[W], a[Y], a[X], a[Z] }
  6142. amQ = _mm_shuffle_ps( amQ, amQ, _MM_SHUFFLE(3, 1, 0, 2));
  6143. // vector4 = { b[W], b[X], b[Z], b[Y] }
  6144. bmQ = _mm_shuffle_ps(bmQ, bmQ, _MM_SHUFFLE(3, 0, 2, 1));
  6145. // mQ = { 0, a[X]*b[Y] - a[Y]*b[X], a[Z]*b[X] - a[X]*b[Z],
  6146. // a[Y]*b[Z] - a[Z]*b[Y] }
  6147. vector1 = _mm_sub_ps(vector2, _mm_mul_ps(amQ, bmQ));
  6148. llassert(v1.isFinite3());
  6149. v1.store4a((F32*)output++);
  6150. idx += 3;
  6151. }
  6152. idx = mIndices;
  6153. LLVector4a* src = triangle_normals.mArray;
  6154. LLVector4a c, n0, n1, n2;
  6155. for (U32 i = 0; i < count; ++i) // For each triangle
  6156. {
  6157. c.load4a((F32*)src++);
  6158. LLVector4a* n0p = norm + idx[0];
  6159. LLVector4a* n1p = norm + idx[1];
  6160. LLVector4a* n2p = norm + idx[2];
  6161. idx += 3;
  6162. n0.load4a((F32*)n0p);
  6163. n1.load4a((F32*)n1p);
  6164. n2.load4a((F32*)n2p);
  6165. n0.add(c);
  6166. n1.add(c);
  6167. n2.add(c);
  6168. llassert(c.isFinite3());
  6169. // Even out quad contributions
  6170. switch (i % 2 + 1)
  6171. {
  6172. case 0: n0.add(c); break;
  6173. case 1: n1.add(c); break;
  6174. case 2: n2.add(c); break;
  6175. }
  6176. n0.store4a((F32*)n0p);
  6177. n1.store4a((F32*)n1p);
  6178. n2.store4a((F32*)n2p);
  6179. }
  6180. // Adjust normals based on wrapping and stitching
  6181. LLVector4a top;
  6182. top.setSub(pos[0], pos[mNumS * (mNumT - 2)]);
  6183. bool s_bottom_converges = top.dot3(top) < 0.000001f;
  6184. top.setSub(pos[mNumS - 1], pos[mNumS * (mNumT - 2) + mNumS - 1]);
  6185. bool s_top_converges = top.dot3(top) < 0.000001f;
  6186. // Logic for non-sculpt volumes:
  6187. if (sculpt_stitching == LL_SCULPT_TYPE_NONE)
  6188. {
  6189. if (!volume->getPath().isOpen())
  6190. {
  6191. // Wrap normals on T
  6192. LLVector4a n;
  6193. for (S32 i = 0; i < mNumS; ++i)
  6194. {
  6195. n.setAdd(norm[i], norm[mNumS * (mNumT - 1) + i]);
  6196. norm[i] = n;
  6197. norm[mNumS * (mNumT - 1) + i] = n;
  6198. }
  6199. }
  6200. if (!s_bottom_converges && !volume->getProfile().isOpen())
  6201. {
  6202. // Wrap normals on S
  6203. LLVector4a n;
  6204. for (S32 i = 0; i < mNumT; ++i)
  6205. {
  6206. n.setAdd(norm[mNumS * i], norm[mNumS * i + mNumS - 1]);
  6207. norm[mNumS * i] = n;
  6208. norm[mNumS * i + mNumS - 1] = n;
  6209. }
  6210. }
  6211. if (volume->getPathType() == LL_PCODE_PATH_CIRCLE &&
  6212. (volume->getProfileType() &
  6213. LL_PCODE_PROFILE_MASK) == LL_PCODE_PROFILE_CIRCLE_HALF)
  6214. {
  6215. if (s_bottom_converges)
  6216. {
  6217. // All lower S have same normal
  6218. for (S32 i = 0; i < mNumT; ++i)
  6219. {
  6220. norm[mNumS * i].set(1, 0, 0);
  6221. }
  6222. }
  6223. if (s_top_converges)
  6224. {
  6225. // All upper S have same normal
  6226. for (S32 i = 0; i < mNumT; ++i)
  6227. {
  6228. norm[mNumS * i + mNumS - 1].set(-1, 0, 0);
  6229. }
  6230. }
  6231. }
  6232. }
  6233. else // Logic for sculpt volumes
  6234. {
  6235. bool average_poles = false;
  6236. bool wrap_s = false;
  6237. bool wrap_t = false;
  6238. if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE)
  6239. {
  6240. average_poles = true;
  6241. }
  6242. if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE ||
  6243. sculpt_stitching == LL_SCULPT_TYPE_TORUS ||
  6244. sculpt_stitching == LL_SCULPT_TYPE_CYLINDER)
  6245. {
  6246. wrap_s = true;
  6247. }
  6248. if (sculpt_stitching == LL_SCULPT_TYPE_TORUS)
  6249. {
  6250. wrap_t = true;
  6251. }
  6252. if (average_poles)
  6253. {
  6254. // Average normals for north pole
  6255. LLVector4a average;
  6256. average.clear();
  6257. for (S32 i = 0; i < mNumS; ++i)
  6258. {
  6259. average.add(norm[i]);
  6260. }
  6261. // Set average
  6262. for (S32 i = 0; i < mNumS; ++i)
  6263. {
  6264. norm[i] = average;
  6265. }
  6266. // Average normals for south pole
  6267. average.clear();
  6268. for (S32 i = 0; i < mNumS; ++i)
  6269. {
  6270. average.add(norm[i + mNumS * (mNumT - 1)]);
  6271. }
  6272. // Set average
  6273. for (S32 i = 0; i < mNumS; ++i)
  6274. {
  6275. norm[i + mNumS * (mNumT - 1)] = average;
  6276. }
  6277. }
  6278. if (wrap_s)
  6279. {
  6280. LLVector4a n;
  6281. for (S32 i = 0; i < mNumT; ++i)
  6282. {
  6283. n.setAdd(norm[mNumS * i], norm[mNumS * i + mNumS - 1]);
  6284. norm[mNumS * i] = n;
  6285. norm[mNumS * i + mNumS - 1] = n;
  6286. }
  6287. }
  6288. if (wrap_t)
  6289. {
  6290. LLVector4a n;
  6291. for (S32 i = 0; i < mNumS; ++i)
  6292. {
  6293. n.setAdd(norm[i], norm[mNumS * (mNumT - 1) + i]);
  6294. norm[i] = n;
  6295. norm[mNumS * (mNumT - 1) + i] = n;
  6296. }
  6297. }
  6298. }
  6299. return true;
  6300. }
  6301. // Used to be a validate_face(const LLVolumeFace& face) global function in
  6302. // llmodel.cpp. HB
  6303. bool LLVolumeFace::validate(bool check_nans) const
  6304. {
  6305. // Note: this check does not exist in LL's viewer, but it allows to prevent
  6306. // crashes when attempting to load an invalid model from a file. It however
  6307. // may cause the mesh upload floater to abort during a LOD optimization
  6308. // process, so I added check_nans to make this check non-fatal and only
  6309. // warn about NaNs when it is false. HB
  6310. for (S32 v = 0; v < mNumVertices; ++v)
  6311. {
  6312. if (mPositions && !mPositions[v].isFinite3())
  6313. {
  6314. llwarns << "NaN position data in face found !" << llendl;
  6315. if (check_nans)
  6316. {
  6317. return false;
  6318. }
  6319. break;
  6320. }
  6321. if (mNormals && !mNormals[v].isFinite3())
  6322. {
  6323. llwarns << "NaN normal data in face found !" << llendl;
  6324. if (check_nans)
  6325. {
  6326. return false;
  6327. }
  6328. break;
  6329. }
  6330. }
  6331. for (S32 i = 0; i < mNumIndices; ++i)
  6332. {
  6333. if (mIndices[i] >= mNumVertices)
  6334. {
  6335. llwarns << "Face has invalid index." << llendl;
  6336. return false;
  6337. }
  6338. }
  6339. if (mNumIndices % 3 != 0 || mNumIndices == 0)
  6340. {
  6341. llwarns << "Face has invalid number of indices." << llendl;
  6342. return false;
  6343. }
  6344. #if 0
  6345. const LLVector4a scale(0.5f);
  6346. for (U32 i = 0; i < mNumIndices; i+=3)
  6347. {
  6348. U16 idx1 = mIndices[i];
  6349. U16 idx2 = mIndices[i + 1];
  6350. U16 idx3 = mIndices[i+2];
  6351. LLVector4a v1; v1.setMul(mPositions[idx1], scale);
  6352. LLVector4a v2; v2.setMul(mPositions[idx2], scale);
  6353. LLVector4a v3; v3.setMul(mPositions[idx3], scale);
  6354. if (isDegenerate(v1, v2, v3))
  6355. {
  6356. llwarns << "Degenerate face found !" << llendl;
  6357. return false;
  6358. }
  6359. }
  6360. #endif
  6361. return true;
  6362. }
  6363. LL_INLINE static F32 dot3fpu(const LLVector4a& a, const LLVector4a& b)
  6364. {
  6365. F32 p0 = a[0] * b[0];
  6366. F32 p1 = a[1] * b[1];
  6367. F32 p2 = a[2] * b[2];
  6368. return p0 + p1 + p2;
  6369. }
  6370. // Used to be a ll_is_degenerate() global function in llmodel.h. HB
  6371. #define LL_DEGENERACY_TOLERANCE 1e-7f
  6372. //static
  6373. bool LLVolumeFace::isDegenerate(const LLVector4a& a, const LLVector4a& b,
  6374. const LLVector4a& c)
  6375. {
  6376. // Small area check
  6377. LLVector4a edge1;
  6378. edge1.setSub(a, b);
  6379. LLVector4a edge2;
  6380. edge2.setSub(a, c);
  6381. //////////////////////////////////////////////////////////////////////////
  6382. /// Linden modified
  6383. // If no one edge is more than 10x longer than any other edge, we weaken
  6384. // the tolerance by a factor of 1e-4f.
  6385. F32 tolerance = LL_DEGENERACY_TOLERANCE;
  6386. LLVector4a edge3; edge3.setSub(c, b);
  6387. F32 len1sq = edge1.dot3(edge1).getF32();
  6388. F32 len2sq = edge2.dot3(edge2).getF32();
  6389. F32 len3sq = edge3.dot3(edge3).getF32();
  6390. bool ab_ok = len1sq <= 100.f * len2sq && len1sq <= 100.f * len3sq;
  6391. bool ac_ok = len2sq <= 100.f * len1sq && len1sq <= 100.f * len3sq;
  6392. bool cb_ok = len3sq <= 100.f * len1sq && len1sq <= 100.f * len2sq;
  6393. if (ab_ok && ac_ok && cb_ok)
  6394. {
  6395. tolerance *= 1e-4f;
  6396. }
  6397. /// End of modifications
  6398. //////////////////////////////////////////////////////////////////////////
  6399. LLVector4a cross;
  6400. cross.setCross3(edge1, edge2);
  6401. LLVector4a edge1b;
  6402. edge1b.setSub(b, a);
  6403. LLVector4a edge2b;
  6404. edge2b.setSub(b, c);
  6405. LLVector4a crossb;
  6406. crossb.setCross3(edge1b, edge2b);
  6407. if (cross.dot3(cross).getF32() < tolerance ||
  6408. crossb.dot3(crossb).getF32() < tolerance)
  6409. {
  6410. return true;
  6411. }
  6412. // Point triangle distance check
  6413. LLVector4a q;
  6414. q.setSub(a, b);
  6415. LLVector4a r;
  6416. r.setSub(c, b);
  6417. F32 qq = dot3fpu(q, q);
  6418. F32 rr = dot3fpu(r, r);
  6419. F32 qr = dot3fpu(r, q);
  6420. F32 qqrr = qq * rr;
  6421. F32 qrqr = qr * qr;
  6422. return qqrr == qrqr;
  6423. }
  6424. // ----------------------------------------------------------------------------
  6425. // LLJointRiggingInfo class.
  6426. // ----------------------------------------------------------------------------
  6427. LLJointRiggingInfo::LLJointRiggingInfo()
  6428. : mIsRiggedTo(false)
  6429. {
  6430. mRiggedExtents[0].clear();
  6431. mRiggedExtents[1].clear();
  6432. }
  6433. void LLJointRiggingInfo::merge(const LLJointRiggingInfo& other)
  6434. {
  6435. if (other.mIsRiggedTo)
  6436. {
  6437. if (mIsRiggedTo)
  6438. {
  6439. // Combine existing boxes
  6440. update_min_max(mRiggedExtents[0], mRiggedExtents[1],
  6441. other.mRiggedExtents[0]);
  6442. update_min_max(mRiggedExtents[0], mRiggedExtents[1],
  6443. other.mRiggedExtents[1]);
  6444. }
  6445. else
  6446. {
  6447. // Initialize box
  6448. mIsRiggedTo = true;
  6449. mRiggedExtents[0] = other.mRiggedExtents[0];
  6450. mRiggedExtents[1] = other.mRiggedExtents[1];
  6451. }
  6452. }
  6453. }
  6454. // ----------------------------------------------------------------------------
  6455. // LLJointRiggingInfoTab class.
  6456. // ----------------------------------------------------------------------------
  6457. LLJointRiggingInfoTab::LLJointRiggingInfoTab()
  6458. : mRigInfoPtr(NULL),
  6459. mSize(0),
  6460. mNeedsUpdate(true)
  6461. {
  6462. }
  6463. LLJointRiggingInfoTab::~LLJointRiggingInfoTab()
  6464. {
  6465. clear();
  6466. }
  6467. void LLJointRiggingInfoTab::clear()
  6468. {
  6469. if (mRigInfoPtr)
  6470. {
  6471. delete[] mRigInfoPtr;
  6472. mRigInfoPtr = NULL;
  6473. mSize = 0;
  6474. }
  6475. }
  6476. void LLJointRiggingInfoTab::resize(U32 size)
  6477. {
  6478. if (size == mSize)
  6479. {
  6480. return;
  6481. }
  6482. if (!size)
  6483. {
  6484. clear();
  6485. return;
  6486. }
  6487. LLJointRiggingInfo* new_info_ptr = new LLJointRiggingInfo[size];
  6488. if (mSize)
  6489. {
  6490. U32 min_size = llmin(size, mSize);
  6491. for (U32 i = 0; i < min_size; ++i)
  6492. {
  6493. LLVector4a* old_extents = mRigInfoPtr[i].getRiggedExtents();
  6494. LLVector4a* new_extents = new_info_ptr[i].getRiggedExtents();
  6495. new_extents[0] = old_extents[0];
  6496. new_extents[1] = old_extents[1];
  6497. }
  6498. delete[] mRigInfoPtr;
  6499. }
  6500. mRigInfoPtr = new_info_ptr;
  6501. mSize = size;
  6502. }
  6503. void LLJointRiggingInfoTab::merge(const LLJointRiggingInfoTab& src)
  6504. {
  6505. if (src.size() > size())
  6506. {
  6507. resize(src.size());
  6508. }
  6509. U32 min_size = llmin(size(), src.size());
  6510. for (U32 i = 0; i < min_size; ++i)
  6511. {
  6512. mRigInfoPtr[i].merge(src[i]);
  6513. }
  6514. }