00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038 #ifndef ENVISION_ENV_IMAGE_OPS_C_DEFINED
00039 #define ENVISION_ENV_IMAGE_OPS_C_DEFINED
00040
00041 #include "Envision/env_image_ops.h"
00042
00043 #include "Envision/env_c_math_ops.h"
00044 #include "Envision/env_log.h"
00045
00046
00047
00048 void env_dec_xy(const struct env_image* src, struct env_image* result)
00049 {
00050
00051 if (src->dims.w <= 1 && src->dims.h <= 1)
00052 {
00053 env_img_copy_src_dst(src, result);
00054 return;
00055 }
00056
00057 if (src->dims.w == 1)
00058 {
00059 env_dec_y(src, result);
00060 return;
00061 }
00062
00063 if (src->dims.h == 1)
00064 {
00065 env_dec_x(src, result);
00066 return;
00067 }
00068
00069 const struct env_dims dims2 = { src->dims.w / 2, src->dims.h / 2 };
00070
00071 env_img_resize_dims(result, dims2);
00072
00073 const intg32* sptr = env_img_pixels(src);
00074 intg32* dptr = env_img_pixelsw(result);
00075 const env_size_t skip = src->dims.w % 2 + src->dims.w;
00076
00077 for (env_size_t j = 0; j < dims2.h; ++j)
00078 {
00079 for (env_size_t i = 0; i < dims2.w; ++i)
00080 {
00081 *dptr++ = *sptr;
00082 sptr += 2;
00083 }
00084 sptr += skip;
00085 }
00086 }
00087
00088
00089 void env_dec_x(const struct env_image* src, struct env_image* result)
00090 {
00091 if (src->dims.w <= 1)
00092 {
00093 env_img_copy_src_dst(src, result);
00094 return;
00095 }
00096
00097 const struct env_dims dims2 = { src->dims.w / 2, src->dims.h };
00098 const env_size_t skip = src->dims.w % 2;
00099 ENV_ASSERT(dims2.w > 0);
00100
00101 env_img_resize_dims(result, dims2);
00102
00103 const intg32* sptr = env_img_pixels(src);
00104 intg32* dptr = env_img_pixelsw(result);
00105
00106 for (env_size_t j = 0; j < dims2.h; ++j)
00107 {
00108 for (env_size_t i = 0; i < dims2.w; ++i)
00109 {
00110 *dptr++ = *sptr;
00111 sptr += 2;
00112 }
00113 sptr += skip;
00114 }
00115 }
00116
00117
00118 void env_dec_y(const struct env_image* src, struct env_image* result)
00119 {
00120 if (src->dims.h <= 1)
00121 {
00122 env_img_copy_src_dst(src, result);
00123 return;
00124 }
00125
00126 const struct env_dims dims2 = { src->dims.w, src->dims.h / 2 };
00127 ENV_ASSERT(dims2.h > 0);
00128
00129 env_img_resize_dims(result, dims2);
00130
00131 const intg32* sptr = env_img_pixels(src);
00132 intg32* dptr = env_img_pixelsw(result);
00133 const env_size_t skip = dims2.w * 2;
00134
00135 for (env_size_t j = 0; j < dims2.h; ++j)
00136 {
00137 for (env_size_t i = 0; i < dims2.w; ++i)
00138 dptr[i] = sptr[i];
00139
00140 dptr += dims2.w;
00141 sptr += skip;
00142 }
00143 }
00144
00145
00146
00147 void env_lowpass_5_x_dec_x(const struct env_image* src,
00148 const struct env_math* imath,
00149 struct env_image* result)
00150 {
00151 const env_size_t w = src->dims.w;
00152 const env_size_t h = src->dims.h;
00153
00154 if (w < 2)
00155 {
00156 env_img_copy_src_dst(src, result);
00157 return;
00158 }
00159
00160 const struct env_dims dims2 = { w / 2, h };
00161 ENV_ASSERT(dims2.w > 0);
00162
00163 env_img_resize_dims(result, dims2);
00164
00165 #ifndef ENV_NO_DEBUG
00166 const env_size_t filterbits = 4;
00167 const env_size_t accumbits = 3;
00168 #endif
00169
00170 ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
00171
00172 env_c_lowpass_5_x_dec_x_fewbits_optim
00173 (env_img_pixels(src), w, h,
00174 env_img_pixelsw(result), dims2.w);
00175 }
00176
00177
00178
00179 void env_lowpass_5_y_dec_y(const struct env_image* src,
00180 const struct env_math* imath,
00181 struct env_image* result)
00182 {
00183 const env_size_t w = src->dims.w;
00184 const env_size_t h = src->dims.h;
00185
00186 if (h < 2)
00187 {
00188 env_img_copy_src_dst(src, result);
00189 return;
00190 }
00191
00192 const struct env_dims dims2 = { w, h / 2 };
00193 ENV_ASSERT(dims2.h > 0);
00194
00195 env_img_resize_dims(result, dims2);
00196
00197 #ifndef ENV_NO_DEBUG
00198 const env_size_t filterbits = 4;
00199 const env_size_t accumbits = 3;
00200 #endif
00201
00202 ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
00203
00204 env_c_lowpass_5_y_dec_y_fewbits_optim
00205 (env_img_pixels(src), w, h,
00206 env_img_pixelsw(result), dims2.h);
00207 }
00208
00209
00210 void env_lowpass_9_x(const struct env_image* source,
00211 const struct env_math* imath,
00212 struct env_image* result)
00213 {
00214 ENV_ASSERT(env_dims_equal(result->dims, source->dims));
00215
00216 #ifndef ENV_NO_DEBUG
00217 const env_size_t filterbits = 8;
00218 const env_size_t accumbits = 4;
00219 #endif
00220
00221 ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
00222
00223 const env_size_t w = source->dims.w;
00224 const env_size_t h = source->dims.h;
00225
00226 if (w < 2)
00227 {
00228 env_img_copy_src_dst(source, result);
00229 return;
00230 }
00231
00232 if (w < 9)
00233 {
00234 const intg32 hf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
00235 const env_size_t hfs = 9;
00236
00237 const intg32* src = env_img_pixels(source);
00238 intg32* dst = env_img_pixelsw(result);
00239
00240 ENV_ASSERT(hfs & 1);
00241 const env_size_t hfs2 = (hfs - 1) / 2;
00242
00243 for (env_size_t j = 0; j < h; ++j)
00244 for (env_size_t i = 0; i < w; ++i)
00245 {
00246 intg32 sum = 0;
00247 intg32 val = 0;
00248 for (env_size_t k = 0; k < hfs; ++k)
00249 {
00250 if (i + k < hfs2
00251 || i + k >= w + hfs2)
00252 continue;
00253
00254
00255
00256
00257 val += src[(env_ssize_t) k
00258 - (env_ssize_t) hfs2]
00259 * hf_flipped[k];
00260 sum += hf_flipped[k];
00261 }
00262
00263 *dst++ = val / sum;
00264 ++src;
00265 }
00266 return;
00267 }
00268
00269 env_c_lowpass_9_x_fewbits_optim(env_img_pixels(source), w, h,
00270 env_img_pixelsw(result));
00271 }
00272
00273
00274 void env_lowpass_9_y(const struct env_image* source,
00275 const struct env_math* imath,
00276 struct env_image* result)
00277 {
00278 ENV_ASSERT(env_dims_equal(result->dims, source->dims));
00279
00280 #ifndef ENV_NO_DEBUG
00281 const env_size_t filterbits = 8;
00282 const env_size_t accumbits = 4;
00283 #endif
00284
00285 ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
00286
00287 const env_size_t w = source->dims.w;
00288 const env_size_t h = source->dims.h;
00289
00290
00291
00292
00293
00294 ENV_ASSERT(h >= 2);
00295
00296 if (h < 9)
00297 {
00298 const intg32 vf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
00299 const env_size_t vfs = 9;
00300
00301 const intg32* src = env_img_pixels(source);
00302 intg32* dst = env_img_pixelsw(result);
00303
00304 ENV_ASSERT(vfs & 1);
00305 const env_size_t vfs2 = (vfs - 1) / 2;
00306
00307 for (env_size_t j = 0; j < h; ++j)
00308 for (env_size_t i = 0; i < w; ++i)
00309 {
00310 intg32 sum = 0;
00311 intg32 val = 0;
00312 for (env_size_t k = 0; k < vfs; ++k)
00313 {
00314 if (j + k < vfs2
00315 || j + k >= h + vfs2)
00316 continue;
00317
00318
00319
00320
00321 val += src[w *
00322 ((env_ssize_t) k
00323 - (env_ssize_t) vfs2)]
00324 * vf_flipped[k];
00325 sum += vf_flipped[k];
00326 }
00327
00328 *dst++ = val / sum;
00329 ++src;
00330 }
00331
00332 return;
00333 }
00334
00335 env_c_lowpass_9_y_fewbits_optim(env_img_pixels(source), w, h,
00336 env_img_pixelsw(result));
00337 }
00338
00339
00340 void env_lowpass_9(const struct env_image* src,
00341 const struct env_math* imath,
00342 struct env_image* result)
00343 {
00344 ENV_ASSERT(env_dims_equal(result->dims, src->dims));
00345
00346 struct env_image tmp1;
00347 env_img_init(&tmp1, src->dims);
00348 env_lowpass_9_x(src, imath, &tmp1);
00349 if (tmp1.dims.h >= 2)
00350 env_lowpass_9_y(&tmp1, imath, result);
00351 else
00352 env_img_swap(&tmp1, result);
00353 env_img_make_empty(&tmp1);
00354 }
00355
00356
00357 void env_quad_energy(const struct env_image* img1,
00358 const struct env_image* img2,
00359 struct env_image* result)
00360 {
00361 ENV_ASSERT(env_dims_equal(img1->dims, img2->dims));
00362 ENV_ASSERT(env_dims_equal(img1->dims, result->dims));
00363
00364 const intg32* s1ptr = env_img_pixels(img1);
00365 const intg32* s2ptr = env_img_pixels(img2);
00366 intg32* dptr = env_img_pixelsw(result);
00367
00368 const env_size_t sz = env_img_size(img1);
00369
00370 for (env_size_t i = 0; i < sz; ++i)
00371 {
00372 const intg32 s1 = ENV_ABS(s1ptr[i]);
00373 const intg32 s2 = ENV_ABS(s2ptr[i]);
00374
00375
00376
00377
00378
00379
00380
00381
00382
00383
00384 dptr[i] = (s1 > s2) ? (s1 + (s2 >> 1)) : ((s1 >> 1) + s2);
00385 }
00386 }
00387
00388
00389 void env_steerable_filter(const struct env_image* src,
00390 const intg32 kxnumer, const intg32 kynumer,
00391 const env_size_t kdenombits,
00392 const struct env_math* imath,
00393 struct env_image* result)
00394 {
00395 ENV_ASSERT(env_dims_equal(result->dims, src->dims));
00396
00397 struct env_image re; env_img_init(&re, src->dims);
00398 struct env_image im; env_img_init(&im, src->dims);
00399 const intg32* sptr = env_img_pixels(src);
00400 intg32* reptr = env_img_pixelsw(&re);
00401 intg32* imptr = env_img_pixelsw(&im);
00402
00403
00404 const env_ssize_t w2l = ((env_ssize_t) src->dims.w) / 2;
00405 const env_ssize_t w2r = ((env_ssize_t) src->dims.w) - w2l;
00406 const env_ssize_t h2l = ((env_ssize_t) src->dims.h) / 2;
00407 const env_ssize_t h2r = ((env_ssize_t) src->dims.h) - h2l;
00408
00409
00410
00411
00412
00413
00414 ENV_ASSERT((INTG32_MAX / (ENV_ABS(kxnumer) + ENV_ABS(kynumer))) > (w2r + h2r));
00415
00416 ENV_ASSERT((2 * ENV_TRIG_NBITS + 1) < 8*sizeof(intg32));
00417
00418 #ifndef ENV_NO_DEBUG
00419 const intg32 mdcutoff = INTG32_MAX >> (ENV_TRIG_NBITS+1);
00420 #endif
00421
00422 for (env_ssize_t j = -h2l; j < h2r; ++j)
00423 for (env_ssize_t i = -w2l; i < w2r; ++i)
00424 {
00425 const intg32 arg = (i * kxnumer + j * kynumer) >> kdenombits;
00426
00427 env_ssize_t idx = arg % ENV_TRIG_TABSIZ;
00428 if (idx < 0) idx += ENV_TRIG_TABSIZ;
00429
00430 const intg32 sval = *sptr++;
00431
00432 ENV_ASSERT(ENV_ABS(sval) < mdcutoff);
00433
00434 *reptr++ = (sval * imath->costab[idx]) >> (ENV_TRIG_NBITS+1);
00435 *imptr++ = (sval * imath->sintab[idx]) >> (ENV_TRIG_NBITS+1);
00436 }
00437
00438 env_lowpass_9(&re, imath, result);
00439 env_img_swap(&re, result);
00440
00441 env_lowpass_9(&im, imath, result);
00442 env_img_swap(&im, result);
00443
00444 env_quad_energy(&re, &im, result);
00445
00446 env_img_make_empty(&re);
00447 env_img_make_empty(&im);
00448 }
00449
00450
00451 void env_attenuate_borders_inplace(struct env_image* a, env_size_t size)
00452 {
00453 ENV_ASSERT(env_img_initialized(a));
00454
00455 struct env_dims dims = a->dims;
00456
00457 if (size * 2 > dims.w) size = dims.w / 2;
00458 if (size * 2 > dims.h) size = dims.h / 2;
00459 if (size < 1) return;
00460
00461 const intg32 size_plus_1 = (intg32) (size+1);
00462
00463
00464 intg32 coeff = 1;
00465 intg32* aptr = env_img_pixelsw(a);
00466 for (env_size_t y = 0; y < size; y ++)
00467 {
00468 for (env_size_t x = 0; x < dims.w; x ++)
00469 {
00470 *aptr = (*aptr / size_plus_1) * coeff;
00471 ++aptr;
00472 }
00473 ++coeff;
00474 }
00475
00476 aptr = env_img_pixelsw(a);
00477 for (env_size_t y = 0; y < dims.h; y ++)
00478 {
00479 coeff = 1;
00480 for (env_size_t x = 0; x < size; x ++)
00481 {
00482 *(aptr + dims.w - 1 - x * 2) =
00483 (*(aptr + dims.w - 1 - x * 2) / size_plus_1) * coeff;
00484
00485 *aptr = (*aptr / size_plus_1) * coeff;
00486 ++aptr;
00487 ++coeff;
00488 }
00489 aptr += dims.w - size;
00490 }
00491
00492 aptr = env_img_pixelsw(a) + (dims.h - size) * dims.w;
00493 coeff = size;
00494 for (env_size_t y = dims.h - size; y < dims.h; y ++)
00495 {
00496 for (env_size_t x = 0; x < dims.w; ++x)
00497 {
00498 *aptr = (*aptr / size_plus_1) * coeff;
00499 ++aptr;
00500 }
00501 --coeff;
00502 }
00503 }
00504
00505
00506 void env_pyr_build_hipass_9(const struct env_image* image,
00507 env_size_t firstlevel,
00508 const struct env_math* imath,
00509 struct env_pyr* result)
00510 {
00511 ENV_ASSERT(env_img_initialized(image));
00512
00513
00514
00515 const env_size_t depth = env_pyr_depth(result);
00516
00517 if (depth == 0)
00518 return;
00519
00520 struct env_image lpfima = env_img_initializer;
00521
00522
00523
00524
00525 env_img_resize_dims(&lpfima, image->dims);
00526 env_lowpass_9(image, imath, &lpfima);
00527
00528 if (0 == firstlevel)
00529 {
00530 env_img_resize_dims(env_pyr_imgw(result, 0),
00531 image->dims);
00532 env_c_image_minus_image
00533 (env_img_pixels(image),
00534 env_img_pixels(&lpfima),
00535 env_img_size(image),
00536 env_img_pixelsw(env_pyr_imgw(result, 0)));
00537 }
00538
00539
00540 for (env_size_t lev = 1; lev < depth; ++lev)
00541 {
00542 struct env_image dec = env_img_initializer;
00543 env_dec_xy(&lpfima, &dec);
00544 env_img_resize_dims(&lpfima, dec.dims);
00545 env_lowpass_9(&dec, imath, &lpfima);
00546
00547 if (lev >= firstlevel)
00548 {
00549 env_img_resize_dims(env_pyr_imgw(result, lev),
00550 dec.dims);
00551 env_c_image_minus_image
00552 (env_img_pixels(&dec),
00553 env_img_pixels(&lpfima),
00554 env_img_size(&dec),
00555 env_img_pixelsw(env_pyr_imgw(result, lev)));
00556 }
00557
00558 env_img_make_empty(&dec);
00559 }
00560
00561 env_img_make_empty(&lpfima);
00562 }
00563
00564
00565 void env_pyr_build_steerable_from_hipass_9(const struct env_pyr* hipass,
00566 const intg32 kxnumer,
00567 const intg32 kynumer,
00568 const env_size_t kdenombits,
00569 const struct env_math* imath,
00570 struct env_pyr* out)
00571 {
00572 const env_size_t attenuation_width = 5;
00573 const env_size_t depth = env_pyr_depth(hipass);
00574
00575 struct env_pyr result;
00576 env_pyr_init(&result, depth);
00577
00578 for (env_size_t lev = 0; lev < depth; ++lev)
00579 {
00580
00581
00582 if (!env_img_initialized(env_pyr_img(hipass, lev)))
00583 continue;
00584
00585 env_img_resize_dims(env_pyr_imgw(&result, lev),
00586 env_pyr_img(hipass, lev)->dims);
00587
00588 env_steerable_filter(env_pyr_img(hipass, lev),
00589 kxnumer, kynumer, kdenombits,
00590 imath, env_pyr_imgw(&result, lev));
00591
00592 env_attenuate_borders_inplace(env_pyr_imgw(&result, lev),
00593 attenuation_width);
00594 }
00595
00596 env_pyr_swap(out, &result);
00597
00598 env_pyr_make_empty(&result);
00599 }
00600
00601
00602
00603 void env_pyr_build_lowpass_5(const struct env_image* image,
00604 env_size_t firstlevel,
00605 const struct env_math* imath,
00606 struct env_pyr* result)
00607 {
00608 ENV_ASSERT(env_img_initialized(image));
00609 ENV_ASSERT(env_pyr_depth(result) > 0);
00610
00611 if (firstlevel == 0)
00612 env_img_copy_src_dst(image, env_pyr_imgw(result, 0));
00613
00614 const env_size_t depth = env_pyr_depth(result);
00615
00616 for (env_size_t lev = 1; lev < depth; ++lev)
00617 {
00618 struct env_image tmp1 = env_img_initializer;
00619
00620 const struct env_image* prev =
00621 lev == 1
00622 ? image
00623 : env_pyr_img(result, lev-1);
00624
00625 env_lowpass_5_x_dec_x(prev,
00626 imath, &tmp1);
00627 env_lowpass_5_y_dec_y(&tmp1,
00628 imath, env_pyr_imgw(result, lev));
00629
00630 if ((lev - 1) < firstlevel)
00631 {
00632 env_img_make_empty(env_pyr_imgw(result, lev-1));
00633 }
00634
00635 env_img_make_empty(&tmp1);
00636 }
00637 }
00638
00639
00640 void env_downsize_9_inplace(struct env_image* src, const env_size_t depth,
00641 const struct env_math* imath)
00642 {
00643 for (env_size_t i = 0; i < depth; ++i)
00644 {
00645 {
00646 struct env_image tmp1;
00647 env_img_init(&tmp1, src->dims);
00648 env_lowpass_9_x(src, imath, &tmp1);
00649 env_dec_x(&tmp1, src);
00650 env_img_make_empty(&tmp1);
00651 }
00652 {
00653 struct env_image tmp2;
00654 env_img_init(&tmp2, src->dims);
00655 if (src->dims.h >= 2)
00656 env_lowpass_9_y(src, imath, &tmp2);
00657 else
00658 env_img_swap(src, &tmp2);
00659 env_dec_y(&tmp2, src);
00660 env_img_make_empty(&tmp2);
00661 }
00662 }
00663 }
00664
00665
00666 void env_rescale(const struct env_image* src, struct env_image* result)
00667 {
00668 const env_ssize_t new_w = (env_ssize_t) result->dims.w;
00669 const env_ssize_t new_h = (env_ssize_t) result->dims.h;
00670
00671 ENV_ASSERT(env_img_initialized(src));
00672 ENV_ASSERT(new_w > 0 && new_h > 0);
00673
00674 const env_ssize_t orig_w = (env_ssize_t) src->dims.w;
00675 const env_ssize_t orig_h = (env_ssize_t) src->dims.h;
00676
00677
00678 if (new_w == orig_w && new_h == orig_h)
00679 {
00680 env_img_copy_src_dst(src, result);
00681 return;
00682 }
00683
00684 intg32* dptr = env_img_pixelsw(result);
00685 const intg32* const sptr = env_img_pixels(src);
00686
00687
00688
00689
00690
00691
00692
00693
00694
00695
00696
00697 for (env_ssize_t j = 0; j < new_h; ++j)
00698 {
00699 const env_ssize_t y_numer = ENV_MAX(((env_ssize_t) 0), j*2*orig_h+orig_h-new_h);
00700 const env_ssize_t y_denom = 2*new_h;
00701
00702 const env_ssize_t y0 = y_numer / y_denom;
00703 const env_ssize_t y1 = ENV_MIN(y0 + 1, orig_h - 1);
00704
00705 const env_ssize_t fy_numer = y_numer - y0 * y_denom;
00706 const env_ssize_t fy_denom = y_denom;
00707 ENV_ASSERT(fy_numer == (y_numer % y_denom));
00708
00709 const env_ssize_t wy0 = orig_w * y0;
00710 const env_ssize_t wy1 = orig_w * y1;
00711
00712 for (env_ssize_t i = 0; i < new_w; ++i)
00713 {
00714 const env_ssize_t x_numer = ENV_MAX(((env_ssize_t) 0), i*2*orig_w+orig_w-new_w);
00715 const env_ssize_t x_denom = 2*new_w;
00716
00717 const env_ssize_t x0 = x_numer / x_denom;
00718 const env_ssize_t x1 = ENV_MIN(x0 + 1, orig_w - 1);
00719
00720 const env_ssize_t fx_numer = x_numer - x0 * x_denom;
00721 const env_ssize_t fx_denom = x_denom;
00722 ENV_ASSERT(fx_numer == (x_numer % x_denom));
00723
00724 const intg32 d00 = sptr[x0 + wy0];
00725 const intg32 d10 = sptr[x1 + wy0];
00726
00727 const intg32 d01 = sptr[x0 + wy1];
00728 const intg32 d11 = sptr[x1 + wy1];
00729
00730 const intg32 dx0 =
00731 d00 + ((d10 - d00) / fx_denom) * fx_numer;
00732 const intg32 dx1 =
00733 d01 + ((d11 - d01) / fx_denom) * fx_numer;
00734
00735
00736 *dptr++ = dx0 + ((dx1 - dx0) / fy_denom) * fy_numer;
00737 }
00738 }
00739 }
00740
00741
00742 void env_max_normalize_inplace(struct env_image* src,
00743 const intg32 mi, const intg32 ma,
00744 const enum env_maxnorm_type normtyp,
00745 const intg32 rangeThresh)
00746 {
00747
00748 switch(normtyp)
00749 {
00750 case ENV_VCXNORM_NONE:
00751 env_max_normalize_none_inplace(src, mi, ma, rangeThresh);
00752 break;
00753 case ENV_VCXNORM_MAXNORM:
00754 env_max_normalize_std_inplace(src, mi, ma, rangeThresh);
00755 break;
00756 default:
00757 ENV_ASSERT2(0, "Invalid normalization type");
00758 }
00759 }
00760
00761
00762 void env_max_normalize_none_inplace(struct env_image* src,
00763 const intg32 nmi, const intg32 nma,
00764 const intg32 rangeThresh)
00765 {
00766
00767 env_c_inplace_rectify(env_img_pixelsw(src), env_img_size(src));
00768
00769
00770 intg32 mi = nmi;
00771 intg32 ma = nma;
00772 if (mi != 0 || ma != 0)
00773 env_c_inplace_normalize(env_img_pixelsw(src),
00774 env_img_size(src),
00775 nmi, nma, &mi, &ma, rangeThresh);
00776 }
00777
00778
00779 void env_max_normalize_std_inplace(struct env_image* src,
00780 const intg32 nmi, const intg32 nma,
00781 const intg32 rangeThresh)
00782 {
00783 if (!env_img_initialized(src))
00784 return;
00785
00786
00787 env_c_inplace_rectify(env_img_pixelsw(src), env_img_size(src));
00788
00789
00790 intg32 mi = nmi;
00791 intg32 ma = nma;
00792 if (nmi != 0 || nma != 0)
00793 env_c_inplace_normalize(env_img_pixelsw(src),
00794 env_img_size(src),
00795 nmi, nma, &mi, &ma, rangeThresh);
00796
00797 const env_size_t w = src->dims.w;
00798 const env_size_t h = src->dims.h;
00799
00800
00801
00802
00803 const intg32 thresh = mi + (ma - mi) / 10;
00804
00805
00806 const intg32* const dptr = env_img_pixels(src);
00807 intg32 lm_mean = 0;
00808 env_size_t numlm = 0;
00809 for (env_size_t j = 1; j+1 < h; ++j)
00810 for (env_size_t i = 1; i+1 < w; ++i)
00811 {
00812 const env_size_t index = i + w * j;
00813 const intg32 val = dptr[index];
00814 if (val >= thresh &&
00815 val >= dptr[index - w] &&
00816 val >= dptr[index + w] &&
00817 val >= dptr[index - 1] &&
00818 val >= dptr[index + 1])
00819 {
00820 ++numlm;
00821 ENV_ASSERT2(INTG32_MAX - val >= lm_mean,
00822 "integer overflow");
00823 lm_mean += val;
00824 }
00825 }
00826
00827 if (numlm > 0)
00828 lm_mean /= numlm;
00829
00830 ENV_ASSERT(ma >= lm_mean);
00831
00832 intg32 factor = 1;
00833
00834
00835 if (numlm > 1)
00836 {
00837
00838 ENV_ASSERT((ma == lm_mean) ||
00839 ((INTG32_MAX / (ma - lm_mean)) > (ma - lm_mean)));
00840
00841 factor = ((ma - lm_mean) * (ma - lm_mean)) / ma;
00842 }
00843 else if (numlm == 1)
00844 {
00845 factor = ma;
00846 }
00847 else
00848 {
00849
00850 }
00851
00852 if (factor != 1)
00853 {
00854 intg32* const itr = env_img_pixelsw(src);
00855 const env_size_t sz = env_img_size(src);
00856 for (env_size_t i = 0; i < sz; ++i)
00857 itr[i] *= factor;
00858 }
00859 }
00860
00861
00862 void env_center_surround(const struct env_image* center,
00863 const struct env_image* surround,
00864 const int absol,
00865 struct env_image* result)
00866 {
00867
00868 ENV_ASSERT(env_dims_equal(result->dims, center->dims));
00869
00870 const env_size_t lw = center->dims.w, lh = center->dims.h;
00871 const env_size_t sw = surround->dims.w, sh = surround->dims.h;
00872
00873 ENV_ASSERT2(lw >= sw && lh >= sh,
00874 "center must be larger than surround");
00875
00876 const env_size_t scalex = lw / sw, remx = lw - 1 - (lw % sw);
00877 const env_size_t scaley = lh / sh, remy = lh - 1 - (lh % sh);
00878
00879
00880 env_size_t ci = 0, cj = 0;
00881 const intg32* lptr = env_img_pixels(center);
00882 const intg32* sptr = env_img_pixels(surround);
00883 intg32* dptr = env_img_pixelsw(result);
00884
00885 if (absol)
00886 {
00887 for (env_size_t j = 0; j < lh; ++j)
00888 {
00889 for (env_size_t i = 0; i < lw; ++i)
00890 {
00891 if (*lptr > *sptr)
00892 *dptr++ = (*lptr++ - *sptr);
00893 else
00894 *dptr++ = (*sptr - *lptr++);
00895
00896 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
00897 }
00898 if (ci) { ci = 0; ++sptr; }
00899 if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw;
00900 }
00901 }
00902 else
00903 {
00904 for (env_size_t j = 0; j < lh; ++j)
00905 {
00906 for (env_size_t i = 0; i < lw; ++i)
00907 {
00908 if (*lptr > *sptr)
00909 *dptr++ = (*lptr++ - *sptr);
00910 else
00911 { *dptr++ = 0; lptr++; }
00912
00913 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
00914 }
00915 if (ci) { ci = 0; ++sptr; }
00916 if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw;
00917 }
00918 }
00919
00920
00921 env_attenuate_borders_inplace
00922 (result, ENV_MAX(result->dims.w, result->dims.h) / 20);
00923 }
00924
00925
00926 void env_get_rgby(const struct env_rgb_pixel* const src,
00927 const struct env_rgb_pixel* const src2,
00928 const env_size_t sz,
00929 struct env_image* rg,
00930 struct env_image* by, const intg32 thresh,
00931 const env_size_t inputbits)
00932 {
00933
00934
00935
00936
00937
00938 ENV_ASSERT(env_img_size(rg) == sz);
00939 ENV_ASSERT(env_img_size(by) == sz);
00940
00941 intg32* rgptr = env_img_pixelsw(rg);
00942 intg32* byptr = env_img_pixelsw(by);
00943
00944 const env_ssize_t lshift = ((env_ssize_t)inputbits) - 3;
00945
00946 for (env_size_t i = 0; i < sz; ++i)
00947 {
00948 intg32 r, g, b;
00949
00950 if (src2 == 0)
00951 {
00952 r = (intg32) src[i].p[0];
00953 g = (intg32) src[i].p[1];
00954 b = (intg32) src[i].p[2];
00955 }
00956 else
00957 {
00958 r = ((intg32) src[i].p[0] + (intg32) src2[i].p[0]) / 2;
00959 g = ((intg32) src[i].p[1] + (intg32) src2[i].p[1]) / 2;
00960 b = ((intg32) src[i].p[2] + (intg32) src2[i].p[2]) / 2;
00961 }
00962
00963
00964 const intg32 lum = r + g + b;
00965
00966 if (lum < thresh)
00967 {
00968 rgptr[i] = byptr[i] = 0;
00969 }
00970 else
00971 {
00972
00973 intg32 red = (2*r - g - b);
00974 intg32 green = (2*g - r - b);
00975 intg32 blue = (2*b - r - g);
00976 intg32 yellow = (-2*blue - 4*ENV_ABS(r-g));
00977
00978 if (red < 0) red = 0;
00979 if (green < 0) green = 0;
00980 if (blue < 0) blue = 0;
00981 if (yellow < 0) yellow=0;
00982
00983
00984 if (lshift > 0)
00985 {
00986 rgptr[i] = (3*(red - green) << lshift) / lum;
00987 byptr[i] = (3*(blue - yellow) << lshift) / lum;
00988 }
00989 else if (lshift < 0)
00990 {
00991 rgptr[i] = ((3*(red - green)) / lum) >> (-lshift);
00992 byptr[i] = ((3*(blue - yellow)) / lum) >> (-lshift);
00993 }
00994 else
00995 {
00996 rgptr[i] = (3*(red - green)) / lum;
00997 byptr[i] = (3*(blue - yellow)) / lum;
00998 }
00999 }
01000 }
01001 }
01002
01003
01004 void env_merge_range(const struct env_image* src,
01005 intg32* mi, intg32* ma)
01006 {
01007 if (src == 0)
01008 return;
01009
01010 const intg32* sptr = env_img_pixels(src);
01011 const env_size_t sz = env_img_size(src);
01012
01013 if (sz == 0)
01014 return;
01015
01016 if (sptr[0] < *mi) *mi = sptr[0];
01017 if (sptr[0] > *ma) *ma = sptr[0];
01018
01019 for (env_size_t i = 1; i < sz; ++i)
01020 {
01021 if (sptr[i] < *mi) *mi = sptr[i];
01022 else if (sptr[i] > *ma) *ma = sptr[i];
01023 }
01024 }
01025
01026
01027 void env_rescale_range_inplace(struct env_image* src,
01028 const intg32 mi, const intg32 ma)
01029 {
01030 if (src == 0)
01031 return;
01032
01033 intg32* sptr = env_img_pixelsw(src);
01034 const env_size_t sz = env_img_size(src);
01035
01036 ENV_ASSERT(ma >= mi);
01037
01038 const intg32 scale = ma - mi;
01039
01040 if (scale < 1)
01041 {
01042 for (env_size_t i = 0; i < sz; ++i)
01043 sptr[i] = 0;
01044 }
01045 else if ((INTG32_MAX / 255) > scale)
01046 {
01047 for (env_size_t i = 0; i < sz; ++i)
01048 {
01049 sptr[i] = ((sptr[i] - mi) * 255) / scale;
01050 }
01051 }
01052 else
01053 {
01054 const intg32 div = scale / 255;
01055 ENV_ASSERT(div > 0);
01056 for (env_size_t i = 0; i < sz; ++i)
01057 {
01058 sptr[i] = (sptr[i] - mi) / div;
01059 }
01060 }
01061 }
01062
01063 #ifdef ENV_WITH_DYNAMIC_CHANNELS
01064
01065
01066 void env_shift_clean(const struct env_image* srcImg,
01067 const env_ssize_t dx, const env_ssize_t dy,
01068 struct env_image* result)
01069 {
01070 if (!env_img_initialized(srcImg))
01071 {
01072 env_img_make_empty(result);
01073 return;
01074 }
01075
01076 ENV_ASSERT(env_dims_equal(result->dims, srcImg->dims));
01077
01078 const env_ssize_t w = (env_ssize_t) srcImg->dims.w;
01079 const env_ssize_t h = (env_ssize_t) srcImg->dims.h;
01080
01081 if (ENV_ABS(dx) >= w || ENV_ABS(dy) >= h)
01082
01083
01084 return;
01085
01086
01087 const env_ssize_t startx = ENV_MAX(((env_ssize_t) 0), -dx);
01088 const env_ssize_t endx = ENV_MIN(w, w - dx);
01089 ENV_ASSERT(startx < w);
01090 ENV_ASSERT(endx > 0);
01091 const env_ssize_t starty = ENV_MAX(((env_ssize_t) 0), -dy);
01092 const env_ssize_t endy = ENV_MIN(h, h - dy);
01093 ENV_ASSERT(starty < h);
01094 ENV_ASSERT(endy > 0);
01095
01096
01097 const intg32* src = env_img_pixels(srcImg);
01098 intg32* dst = env_img_pixelsw(result);
01099
01100 src += startx + starty * w;
01101 dst += (startx + dx) + (starty + dy) * w;
01102
01103 const env_ssize_t skip = w - endx + startx;
01104
01105
01106 for (env_ssize_t j = starty; j < endy; ++j)
01107 {
01108 for (env_ssize_t i = startx; i < endx; ++i)
01109 *dst++ = *src++;
01110
01111
01112 src += skip; dst += skip;
01113 }
01114 }
01115
01116
01117 void env_shift_image(const struct env_image* srcImg,
01118 const env_ssize_t dxnumer, const env_ssize_t dynumer,
01119 const env_size_t denombits,
01120 struct env_image* result)
01121 {
01122 if (!env_img_initialized(srcImg))
01123 {
01124 env_img_make_empty(result);
01125 return;
01126 }
01127
01128 ENV_ASSERT(env_dims_equal(result->dims, srcImg->dims));
01129
01130 ENV_ASSERT(denombits < 8*sizeof(intg32));
01131
01132 const env_ssize_t denom = (1 << denombits);
01133
01134 const env_ssize_t w = (env_ssize_t) srcImg->dims.w;
01135 const env_ssize_t h = (env_ssize_t) srcImg->dims.h;
01136
01137
01138 env_ssize_t xt = dxnumer >= 0
01139 ? (dxnumer >> denombits)
01140 : - ((-dxnumer + denom-1) >> denombits);
01141 env_ssize_t xfrac_numer = dxnumer - (xt << denombits);
01142 const env_ssize_t startx = ENV_MAX(((env_ssize_t) 0),xt);
01143 const env_ssize_t endx = ENV_MIN(((env_ssize_t) 0),xt) + w - 1;
01144
01145
01146 env_ssize_t yt = dynumer >= 0
01147 ? (dynumer >> denombits)
01148 : - ((-dynumer + denom-1) >> denombits);
01149 env_ssize_t yfrac_numer = dynumer - (yt << denombits);
01150 const env_ssize_t starty = ENV_MAX(((env_ssize_t) 0),yt);
01151 const env_ssize_t endy = ENV_MIN(((env_ssize_t) 0),yt) + h - 1;
01152
01153
01154 {
01155 const env_size_t sz = w * h;
01156 intg32* const rptr = env_img_pixelsw(result);
01157 for (env_size_t i = 0; i < sz; ++i)
01158 rptr[i] = 0;
01159 }
01160
01161
01162
01163 if (xfrac_numer == 0 && yfrac_numer == 0)
01164 {
01165 env_shift_clean(srcImg, xt, yt, result);
01166 return;
01167 }
01168
01169 if (xfrac_numer > 0)
01170 {
01171 xfrac_numer = denom - xfrac_numer;
01172 ++xt;
01173 }
01174
01175 if (yfrac_numer > 0)
01176 {
01177 yfrac_numer = denom - yfrac_numer;
01178 ++yt;
01179 }
01180
01181
01182 const intg32* src2 = env_img_pixels(srcImg);
01183 intg32* ret2 = env_img_pixelsw(result);
01184 if (xt > 0) ret2 += xt;
01185 else if (xt < 0) src2 -= xt;
01186 if (yt > 0) ret2 += yt * w;
01187 else if (yt < 0) src2 -= yt * w;
01188
01189
01190 for (env_ssize_t y = starty; y < endy; ++y)
01191 {
01192 const intg32* src = src2;
01193 intg32* ret = ret2;
01194 for (env_ssize_t x = startx; x < endx; ++x)
01195 {
01196 *ret = (((src[0] >> denombits) * (denom - xfrac_numer)) >> denombits) * (denom - yfrac_numer);
01197 *ret += (((src[1] >> denombits) * xfrac_numer) >> denombits) * (denom - yfrac_numer);
01198 *ret += (((src[w] >> denombits) * (denom - xfrac_numer)) >> denombits) * yfrac_numer;
01199 *ret += (((src[w+1] >> denombits) * xfrac_numer) >> denombits) * yfrac_numer;
01200 ++src; ++ret;
01201 }
01202 src2 += w; ret2 += w;
01203 }
01204 }
01205
01206 #endif // ENV_WITH_DYNAMIC_CHANNELS
01207
01208
01209
01210
01211
01212
01213
01214
01215 #endif // ENVISION_ENV_IMAGE_OPS_C_DEFINED