summaryrefslogtreecommitdiff
path: root/drivers/webp/enc/histogram.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/webp/enc/histogram.c')
-rw-r--r--drivers/webp/enc/histogram.c453
1 files changed, 246 insertions, 207 deletions
diff --git a/drivers/webp/enc/histogram.c b/drivers/webp/enc/histogram.c
index 68c27fb1db..61544f4ccd 100644
--- a/drivers/webp/enc/histogram.c
+++ b/drivers/webp/enc/histogram.c
@@ -157,6 +157,109 @@ void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
}
// -----------------------------------------------------------------------------
+// Entropy-related functions.
+
+static WEBP_INLINE double BitsEntropyRefine(const VP8LBitEntropy* entropy) {
+ double mix;
+ if (entropy->nonzeros < 5) {
+ if (entropy->nonzeros <= 1) {
+ return 0;
+ }
+ // Two symbols, they will be 0 and 1 in a Huffman code.
+ // Let's mix in a bit of entropy to favor good clustering when
+ // distributions of these are combined.
+ if (entropy->nonzeros == 2) {
+ return 0.99 * entropy->sum + 0.01 * entropy->entropy;
+ }
+ // No matter what the entropy says, we cannot be better than min_limit
+ // with Huffman coding. I am mixing a bit of entropy into the
+ // min_limit since it produces much better (~0.5 %) compression results
+ // perhaps because of better entropy clustering.
+ if (entropy->nonzeros == 3) {
+ mix = 0.95;
+ } else {
+ mix = 0.7; // nonzeros == 4.
+ }
+ } else {
+ mix = 0.627;
+ }
+
+ {
+ double min_limit = 2 * entropy->sum - entropy->max_val;
+ min_limit = mix * min_limit + (1.0 - mix) * entropy->entropy;
+ return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;
+ }
+}
+
+double VP8LBitsEntropy(const uint32_t* const array, int n,
+ uint32_t* const trivial_symbol) {
+ VP8LBitEntropy entropy;
+ VP8LBitsEntropyUnrefined(array, n, &entropy);
+ if (trivial_symbol != NULL) {
+ *trivial_symbol =
+ (entropy.nonzeros == 1) ? entropy.nonzero_code : VP8L_NON_TRIVIAL_SYM;
+ }
+
+ return BitsEntropyRefine(&entropy);
+}
+
+static double InitialHuffmanCost(void) {
+ // Small bias because Huffman code length is typically not stored in
+ // full length.
+ static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;
+ static const double kSmallBias = 9.1;
+ return kHuffmanCodeOfHuffmanCodeSize - kSmallBias;
+}
+
+// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)
+static double FinalHuffmanCost(const VP8LStreaks* const stats) {
+ double retval = InitialHuffmanCost();
+ retval += stats->counts[0] * 1.5625 + 0.234375 * stats->streaks[0][1];
+ retval += stats->counts[1] * 2.578125 + 0.703125 * stats->streaks[1][1];
+ retval += 1.796875 * stats->streaks[0][0];
+ retval += 3.28125 * stats->streaks[1][0];
+ return retval;
+}
+
+// Get the symbol entropy for the distribution 'population'.
+// Set 'trivial_sym', if there's only one symbol present in the distribution.
+static double PopulationCost(const uint32_t* const population, int length,
+ uint32_t* const trivial_sym) {
+ VP8LBitEntropy bit_entropy;
+ VP8LStreaks stats;
+ VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
+ if (trivial_sym != NULL) {
+ *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code
+ : VP8L_NON_TRIVIAL_SYM;
+ }
+
+ return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
+}
+
+static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
+ const uint32_t* const Y,
+ int length) {
+ VP8LBitEntropy bit_entropy;
+ VP8LStreaks stats;
+ VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
+
+ return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
+}
+
+// Estimates the Entropy + Huffman + other block overhead size cost.
+double VP8LHistogramEstimateBits(const VP8LHistogram* const p) {
+ return
+ PopulationCost(
+ p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL)
+ + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL)
+ + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL)
+ + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL)
+ + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL)
+ + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES)
+ + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES);
+}
+
+// -----------------------------------------------------------------------------
// Various histogram combine/cost-eval functions
static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
@@ -165,26 +268,25 @@ static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
double* cost) {
const int palette_code_bits = a->palette_code_bits_;
assert(a->palette_code_bits_ == b->palette_code_bits_);
- *cost += VP8LGetCombinedEntropy(a->literal_, b->literal_,
- VP8LHistogramNumCodes(palette_code_bits));
+ *cost += GetCombinedEntropy(a->literal_, b->literal_,
+ VP8LHistogramNumCodes(palette_code_bits));
*cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,
b->literal_ + NUM_LITERAL_CODES,
NUM_LENGTH_CODES);
if (*cost > cost_threshold) return 0;
- *cost += VP8LGetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES);
+ *cost += GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES);
if (*cost > cost_threshold) return 0;
- *cost += VP8LGetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES);
+ *cost += GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES);
if (*cost > cost_threshold) return 0;
- *cost += VP8LGetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES);
+ *cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES);
if (*cost > cost_threshold) return 0;
- *cost += VP8LGetCombinedEntropy(a->distance_, b->distance_,
- NUM_DISTANCE_CODES);
- *cost += VP8LExtraCostCombined(a->distance_, b->distance_,
- NUM_DISTANCE_CODES);
+ *cost += GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES);
+ *cost +=
+ VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES);
if (*cost > cost_threshold) return 0;
return 1;
@@ -262,17 +364,17 @@ static void UpdateDominantCostRange(
static void UpdateHistogramCost(VP8LHistogram* const h) {
uint32_t alpha_sym, red_sym, blue_sym;
- const double alpha_cost = VP8LPopulationCost(h->alpha_, NUM_LITERAL_CODES,
- &alpha_sym);
+ const double alpha_cost =
+ PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym);
const double distance_cost =
- VP8LPopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL) +
+ PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL) +
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
- h->literal_cost_ = VP8LPopulationCost(h->literal_, num_codes, NULL) +
+ h->literal_cost_ = PopulationCost(h->literal_, num_codes, NULL) +
VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES,
NUM_LENGTH_CODES);
- h->red_cost_ = VP8LPopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym);
- h->blue_cost_ = VP8LPopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym);
+ h->red_cost_ = PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym);
+ h->blue_cost_ = PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym);
h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +
alpha_cost + distance_cost;
if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {
@@ -284,29 +386,27 @@ static void UpdateHistogramCost(VP8LHistogram* const h) {
}
static int GetBinIdForEntropy(double min, double max, double val) {
- const double range = max - min + 1e-6;
- const double delta = val - min;
- return (int)(NUM_PARTITIONS * delta / range);
+ const double range = max - min;
+ if (range > 0.) {
+ const double delta = val - min;
+ return (int)((NUM_PARTITIONS - 1e-6) * delta / range);
+ } else {
+ return 0;
+ }
}
-static int GetHistoBinIndexLowEffort(
- const VP8LHistogram* const h, const DominantCostRange* const c) {
- const int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_,
- h->literal_cost_);
+static int GetHistoBinIndex(const VP8LHistogram* const h,
+ const DominantCostRange* const c, int low_effort) {
+ int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_,
+ h->literal_cost_);
assert(bin_id < NUM_PARTITIONS);
- return bin_id;
-}
-
-static int GetHistoBinIndex(
- const VP8LHistogram* const h, const DominantCostRange* const c) {
- const int bin_id =
- GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_) +
- NUM_PARTITIONS * GetBinIdForEntropy(c->red_min_, c->red_max_,
- h->red_cost_) +
- NUM_PARTITIONS * NUM_PARTITIONS * GetBinIdForEntropy(c->literal_min_,
- c->literal_max_,
- h->literal_cost_);
- assert(bin_id < BIN_SIZE);
+ if (!low_effort) {
+ bin_id = bin_id * NUM_PARTITIONS
+ + GetBinIdForEntropy(c->red_min_, c->red_max_, h->red_cost_);
+ bin_id = bin_id * NUM_PARTITIONS
+ + GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_);
+ assert(bin_id < BIN_SIZE);
+ }
return bin_id;
}
@@ -367,16 +467,13 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
// bin-hash histograms on three of the dominant (literal, red and blue)
// symbol costs.
for (i = 0; i < histo_size; ++i) {
- int num_histos;
- VP8LHistogram* const histo = histograms[i];
- const int16_t bin_id = low_effort ?
- (int16_t)GetHistoBinIndexLowEffort(histo, &cost_range) :
- (int16_t)GetHistoBinIndex(histo, &cost_range);
+ const VP8LHistogram* const histo = histograms[i];
+ const int bin_id = GetHistoBinIndex(histo, &cost_range, low_effort);
const int bin_offset = bin_id * bin_depth;
// bin_map[n][0] for every bin 'n' maintains the counter for the number of
// histograms in that bin.
// Get and increment the num_histos in that bin.
- num_histos = ++bin_map[bin_offset];
+ const int num_histos = ++bin_map[bin_offset];
assert(bin_offset + num_histos < bin_depth * BIN_SIZE);
// Add histogram i'th index at num_histos (last) position in the bin_map.
bin_map[bin_offset + num_histos] = i;
@@ -478,26 +575,31 @@ typedef struct {
} HistogramPair;
typedef struct {
- HistogramPair* heap;
- int* positions;
+ HistogramPair* queue;
int size;
- int max_index;
-} HistoHeap;
-
-static int HistoHeapInit(HistoHeap* const histo_heap, const int max_index) {
- histo_heap->size = 0;
- histo_heap->max_index = max_index;
- histo_heap->heap = WebPSafeMalloc(max_index * max_index,
- sizeof(*histo_heap->heap));
- histo_heap->positions = WebPSafeMalloc(max_index * max_index,
- sizeof(*histo_heap->positions));
- return histo_heap->heap != NULL && histo_heap->positions != NULL;
-}
-
-static void HistoHeapClear(HistoHeap* const histo_heap) {
- assert(histo_heap != NULL);
- WebPSafeFree(histo_heap->heap);
- WebPSafeFree(histo_heap->positions);
+ int max_size;
+} HistoQueue;
+
+static int HistoQueueInit(HistoQueue* const histo_queue, const int max_index) {
+ histo_queue->size = 0;
+ // max_index^2 for the queue size is safe. If you look at
+ // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
+ // data to the queue, you insert at most:
+ // - max_index*(max_index-1)/2 (the first two for loops)
+ // - max_index - 1 in the last for loop at the first iteration of the while
+ // loop, max_index - 2 at the second iteration ... therefore
+ // max_index*(max_index-1)/2 overall too
+ histo_queue->max_size = max_index * max_index;
+ // We allocate max_size + 1 because the last element at index "size" is
+ // used as temporary data (and it could be up to max_size).
+ histo_queue->queue = WebPSafeMalloc(histo_queue->max_size + 1,
+ sizeof(*histo_queue->queue));
+ return histo_queue->queue != NULL;
+}
+
+static void HistoQueueClear(HistoQueue* const histo_queue) {
+ assert(histo_queue != NULL);
+ WebPSafeFree(histo_queue->queue);
}
static void SwapHistogramPairs(HistogramPair *p1,
@@ -507,66 +609,33 @@ static void SwapHistogramPairs(HistogramPair *p1,
*p2 = tmp;
}
-// Given a valid min-heap in range [0, heap_size-1) this function places value
-// heap[heap_size-1] into right location within heap and sets its position in
-// positions array.
-static void HeapPush(HistoHeap* const histo_heap) {
- HistogramPair* const heap = histo_heap->heap - 1;
- int* const positions = histo_heap->positions;
- const int max_index = histo_heap->max_index;
- int v;
- ++histo_heap->size;
- v = histo_heap->size;
- while (v > 1 && heap[v].cost_diff < heap[v >> 1].cost_diff) {
- SwapHistogramPairs(&heap[v], &heap[v >> 1]);
- // Change position of moved pair in heap.
- if (heap[v].idx1 >= 0) {
- const int pos = heap[v].idx1 * max_index + heap[v].idx2;
- assert(pos >= 0 && pos < max_index * max_index);
- positions[pos] = v;
- }
- v >>= 1;
- }
- positions[heap[v].idx1 * max_index + heap[v].idx2] = v;
-}
-
-// Given a valid min-heap in range [0, heap_size) this function shortens heap
-// range by one and places element with the lowest value to (heap_size-1).
-static void HeapPop(HistoHeap* const histo_heap) {
- HistogramPair* const heap = histo_heap->heap - 1;
- int* const positions = histo_heap->positions;
- const int heap_size = histo_heap->size;
- const int max_index = histo_heap->max_index;
- int v = 1;
- if (heap[v].idx1 >= 0) {
- positions[heap[v].idx1 * max_index + heap[v].idx2] = -1;
- }
- SwapHistogramPairs(&heap[v], &heap[heap_size]);
- while ((v << 1) < heap_size) {
- int son = (heap[v << 1].cost_diff < heap[v].cost_diff) ? (v << 1) : v;
- if (((v << 1) + 1) < heap_size &&
- heap[(v << 1) + 1].cost_diff < heap[son].cost_diff) {
- son = (v << 1) + 1;
- }
- if (son == v) break;
- SwapHistogramPairs(&heap[v], &heap[son]);
- // Change position of moved pair in heap.
- if (heap[v].idx1 >= 0) {
- positions[heap[v].idx1 * max_index + heap[v].idx2] = v;
- }
- v = son;
- }
- if (heap[v].idx1 >= 0) {
- positions[heap[v].idx1 * max_index + heap[v].idx2] = v;
+// Given a valid priority queue in range [0, queue_size) this function checks
+// whether histo_queue[queue_size] should be accepted and swaps it with the
+// front if it is smaller. Otherwise, it leaves it as is.
+static void UpdateQueueFront(HistoQueue* const histo_queue) {
+ if (histo_queue->queue[histo_queue->size].cost_diff >= 0) return;
+
+ if (histo_queue->queue[histo_queue->size].cost_diff <
+ histo_queue->queue[0].cost_diff) {
+ SwapHistogramPairs(histo_queue->queue,
+ histo_queue->queue + histo_queue->size);
}
- --histo_heap->size;
+ ++histo_queue->size;
+
+ // We cannot add more elements than the capacity.
+ // The allocation adds an extra element to the official capacity so that
+ // histo_queue->queue[histo_queue->max_size] is read/written within bound.
+ assert(histo_queue->size <= histo_queue->max_size);
}
// -----------------------------------------------------------------------------
static void PreparePair(VP8LHistogram** histograms, int idx1, int idx2,
- HistogramPair* const pair,
- VP8LHistogram* const histos) {
+ HistogramPair* const pair) {
+ VP8LHistogram* h1;
+ VP8LHistogram* h2;
+ double sum_cost;
+
if (idx1 > idx2) {
const int tmp = idx2;
idx2 = idx1;
@@ -574,60 +643,27 @@ static void PreparePair(VP8LHistogram** histograms, int idx1, int idx2,
}
pair->idx1 = idx1;
pair->idx2 = idx2;
- pair->cost_diff =
- HistogramAddEval(histograms[idx1], histograms[idx2], histos, 0);
- pair->cost_combo = histos->bit_cost_;
-}
-
-#define POSITION_INVALID (-1)
-
-// Invalidates pairs intersecting (idx1, idx2) in heap.
-static void InvalidatePairs(int idx1, int idx2,
- const HistoHeap* const histo_heap) {
- HistogramPair* const heap = histo_heap->heap - 1;
- int* const positions = histo_heap->positions;
- const int max_index = histo_heap->max_index;
- int i;
- for (i = 0; i < idx1; ++i) {
- const int pos = positions[i * max_index + idx1];
- if (pos >= 0) {
- heap[pos].idx1 = POSITION_INVALID;
- }
- }
- for (i = idx1 + 1; i < max_index; ++i) {
- const int pos = positions[idx1 * max_index + i];
- if (pos >= 0) {
- heap[pos].idx1 = POSITION_INVALID;
- }
- }
- for (i = 0; i < idx2; ++i) {
- const int pos = positions[i * max_index + idx2];
- if (pos >= 0) {
- heap[pos].idx1 = POSITION_INVALID;
- }
- }
- for (i = idx2 + 1; i < max_index; ++i) {
- const int pos = positions[idx2 * max_index + i];
- if (pos >= 0) {
- heap[pos].idx1 = POSITION_INVALID;
- }
- }
+ h1 = histograms[idx1];
+ h2 = histograms[idx2];
+ sum_cost = h1->bit_cost_ + h2->bit_cost_;
+ pair->cost_combo = 0.;
+ GetCombinedHistogramEntropy(h1, h2, sum_cost, &pair->cost_combo);
+ pair->cost_diff = pair->cost_combo - sum_cost;
}
// Combines histograms by continuously choosing the one with the highest cost
// reduction.
-static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
- VP8LHistogram* const histos) {
+static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
int ok = 0;
int image_histo_size = image_histo->size;
int i, j;
VP8LHistogram** const histograms = image_histo->histograms;
// Indexes of remaining histograms.
int* const clusters = WebPSafeMalloc(image_histo_size, sizeof(*clusters));
- // Heap of histogram pairs.
- HistoHeap histo_heap;
+ // Priority queue of histogram pairs.
+ HistoQueue histo_queue;
- if (!HistoHeapInit(&histo_heap, image_histo_size) || clusters == NULL) {
+ if (!HistoQueueInit(&histo_queue, image_histo_size) || clusters == NULL) {
goto End;
}
@@ -636,19 +672,17 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
clusters[i] = i;
for (j = i + 1; j < image_histo_size; ++j) {
// Initialize positions array.
- histo_heap.positions[i * histo_heap.max_index + j] = POSITION_INVALID;
- PreparePair(histograms, i, j, &histo_heap.heap[histo_heap.size], histos);
- if (histo_heap.heap[histo_heap.size].cost_diff < 0) {
- HeapPush(&histo_heap);
- }
+ PreparePair(histograms, i, j, &histo_queue.queue[histo_queue.size]);
+ UpdateQueueFront(&histo_queue);
}
}
- while (image_histo_size > 1 && histo_heap.size > 0) {
- const int idx1 = histo_heap.heap[0].idx1;
- const int idx2 = histo_heap.heap[0].idx2;
+ while (image_histo_size > 1 && histo_queue.size > 0) {
+ HistogramPair* copy_to;
+ const int idx1 = histo_queue.queue[0].idx1;
+ const int idx2 = histo_queue.queue[0].idx2;
VP8LHistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
- histograms[idx1]->bit_cost_ = histo_heap.heap[0].cost_combo;
+ histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
// Remove merged histogram.
for (i = 0; i + 1 < image_histo_size; ++i) {
if (clusters[i] >= idx2) {
@@ -657,22 +691,31 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
}
--image_histo_size;
- // Invalidate pairs intersecting the just combined best pair.
- InvalidatePairs(idx1, idx2, &histo_heap);
-
- // Pop invalid pairs from the top of the heap.
- while (histo_heap.size > 0 && histo_heap.heap[0].idx1 < 0) {
- HeapPop(&histo_heap);
+ // Remove pairs intersecting the just combined best pair. This will
+ // therefore pop the head of the queue.
+ copy_to = histo_queue.queue;
+ for (i = 0; i < histo_queue.size; ++i) {
+ HistogramPair* const p = histo_queue.queue + i;
+ if (p->idx1 == idx1 || p->idx2 == idx1 ||
+ p->idx1 == idx2 || p->idx2 == idx2) {
+ // Do not copy the invalid pair.
+ continue;
+ }
+ if (p->cost_diff < histo_queue.queue[0].cost_diff) {
+ // Replace the top of the queue if we found better.
+ SwapHistogramPairs(histo_queue.queue, p);
+ }
+ SwapHistogramPairs(copy_to, p);
+ ++copy_to;
}
+ histo_queue.size = (int)(copy_to - histo_queue.queue);
- // Push new pairs formed with combined histogram to the heap.
+ // Push new pairs formed with combined histogram to the queue.
for (i = 0; i < image_histo_size; ++i) {
if (clusters[i] != idx1) {
PreparePair(histograms, idx1, clusters[i],
- &histo_heap.heap[histo_heap.size], histos);
- if (histo_heap.heap[histo_heap.size].cost_diff < 0) {
- HeapPush(&histo_heap);
- }
+ &histo_queue.queue[histo_queue.size]);
+ UpdateQueueFront(&histo_queue);
}
}
}
@@ -688,15 +731,14 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
End:
WebPSafeFree(clusters);
- HistoHeapClear(&histo_heap);
+ HistoQueueClear(&histo_queue);
return ok;
}
-static VP8LHistogram* HistogramCombineStochastic(
- VP8LHistogramSet* const image_histo,
- VP8LHistogram* tmp_histo,
- VP8LHistogram* best_combo,
- int quality, int min_cluster_size) {
+static void HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
+ VP8LHistogram* tmp_histo,
+ VP8LHistogram* best_combo,
+ int quality, int min_cluster_size) {
int iter;
uint32_t seed = 0;
int tries_with_no_success = 0;
@@ -756,7 +798,6 @@ static VP8LHistogram* HistogramCombineStochastic(
}
}
image_histo->size = image_histo_size;
- return best_combo;
}
// -----------------------------------------------------------------------------
@@ -764,24 +805,23 @@ static VP8LHistogram* HistogramCombineStochastic(
// Find the best 'out' histogram for each of the 'in' histograms.
// Note: we assume that out[]->bit_cost_ is already up-to-date.
-static void HistogramRemap(const VP8LHistogramSet* const orig_histo,
- const VP8LHistogramSet* const image_histo,
+static void HistogramRemap(const VP8LHistogramSet* const in,
+ const VP8LHistogramSet* const out,
uint16_t* const symbols) {
int i;
- VP8LHistogram** const orig_histograms = orig_histo->histograms;
- VP8LHistogram** const histograms = image_histo->histograms;
- const int orig_histo_size = orig_histo->size;
- const int image_histo_size = image_histo->size;
- if (image_histo_size > 1) {
- for (i = 0; i < orig_histo_size; ++i) {
+ VP8LHistogram** const in_histo = in->histograms;
+ VP8LHistogram** const out_histo = out->histograms;
+ const int in_size = in->size;
+ const int out_size = out->size;
+ if (out_size > 1) {
+ for (i = 0; i < in_size; ++i) {
int best_out = 0;
- double best_bits =
- HistogramAddThresh(histograms[0], orig_histograms[i], MAX_COST);
+ double best_bits = MAX_COST;
int k;
- for (k = 1; k < image_histo_size; ++k) {
+ for (k = 0; k < out_size; ++k) {
const double cur_bits =
- HistogramAddThresh(histograms[k], orig_histograms[i], best_bits);
- if (cur_bits < best_bits) {
+ HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
+ if (k == 0 || cur_bits < best_bits) {
best_bits = cur_bits;
best_out = k;
}
@@ -789,20 +829,20 @@ static void HistogramRemap(const VP8LHistogramSet* const orig_histo,
symbols[i] = best_out;
}
} else {
- assert(image_histo_size == 1);
- for (i = 0; i < orig_histo_size; ++i) {
+ assert(out_size == 1);
+ for (i = 0; i < in_size; ++i) {
symbols[i] = 0;
}
}
// Recompute each out based on raw and symbols.
- for (i = 0; i < image_histo_size; ++i) {
- HistogramClear(histograms[i]);
+ for (i = 0; i < out_size; ++i) {
+ HistogramClear(out_histo[i]);
}
- for (i = 0; i < orig_histo_size; ++i) {
+ for (i = 0; i < in_size; ++i) {
const int idx = symbols[i];
- VP8LHistogramAdd(orig_histograms[i], histograms[idx], histograms[idx]);
+ VP8LHistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);
}
}
@@ -876,11 +916,10 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
const float x = quality / 100.f;
// cubic ramp between 1 and MAX_HISTO_GREEDY:
const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1));
- cur_combo = HistogramCombineStochastic(image_histo,
- tmp_histos->histograms[0],
- cur_combo, quality, threshold_size);
+ HistogramCombineStochastic(image_histo, tmp_histos->histograms[0],
+ cur_combo, quality, threshold_size);
if ((image_histo->size <= threshold_size) &&
- !HistogramCombineGreedy(image_histo, cur_combo)) {
+ !HistogramCombineGreedy(image_histo)) {
goto Error;
}
}