summaryrefslogtreecommitdiff
path: root/thirdparty/graphite/src/inc/Sparse.h
blob: 3c5d33ae5d18aeb614a6e1efa9cee1c75de88de7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
// SPDX-License-Identifier: MIT OR MPL-2.0 OR LGPL-2.1-or-later OR GPL-2.0-or-later
// Copyright 2011, SIL International, All rights reserved.

#pragma once
#include <iterator>
#include <utility>

#include "inc/Main.h"

namespace graphite2 {


// A read-only packed fast sparse array of uint16 with uint16 keys.
// Like most container classes this has capacity and size properties and these
// refer to the number of stored entries and the number of addressable entries
// as normal. However due the sparse nature the capacity is always <= than the
// size.
class sparse
{
public:
    typedef uint16  key_type;
    typedef uint16  mapped_type;
    typedef std::pair<const key_type, mapped_type> value_type;

private:
    typedef unsigned long   mask_t;

    static const unsigned char  SIZEOF_CHUNK = (sizeof(mask_t) - sizeof(key_type))*8;

    struct chunk
    {
        mask_t          mask:SIZEOF_CHUNK;
        key_type        offset;
    };

    static const chunk  empty_chunk;
    sparse(const sparse &);
    sparse & operator = (const sparse &);

public:
    template<typename I>
    sparse(I first, const I last);
    sparse() throw();
    ~sparse() throw();

    operator bool () const throw();
    mapped_type     operator [] (const key_type k) const throw();

    size_t capacity() const throw();
    size_t size()     const throw();

    size_t _sizeof() const throw();

    CLASS_NEW_DELETE;

private:
    union {
        chunk         * map;
        mapped_type   * values;
    }           m_array;
    key_type    m_nchunks;
};


inline
sparse::sparse() throw() : m_nchunks(0)
{
    m_array.map = const_cast<graphite2::sparse::chunk *>(&empty_chunk);
}


template <typename I>
sparse::sparse(I attr, const I last)
: m_nchunks(0)
{
    m_array.map = 0;

    // Find the maximum extent of the key space.
    size_t n_values=0;
    long lastkey = -1;
    for (I i = attr; i != last; ++i, ++n_values)
    {
        const typename std::iterator_traits<I>::value_type v = *i;
        if (v.second == 0)      { --n_values; continue; }
        if (v.first <= lastkey) { m_nchunks = 0; return; }

        lastkey = v.first;
        const key_type k = v.first / SIZEOF_CHUNK;
        if (k >= m_nchunks) m_nchunks = k+1;
    }
    if (m_nchunks == 0)
    {
        m_array.map=const_cast<graphite2::sparse::chunk *>(&empty_chunk);
        return;
    }

    m_array.values = grzeroalloc<mapped_type>((m_nchunks*sizeof(chunk) + sizeof(mapped_type)-1)
                                                 / sizeof(mapped_type)
                                                 + n_values);

    if (m_array.values == 0)
        return;

    // coverity[forward_null : FALSE] Since m_array is union and m_array.values is not NULL
    chunk * ci = m_array.map;
    ci->offset = (m_nchunks*sizeof(chunk) + sizeof(mapped_type)-1)/sizeof(mapped_type);
    mapped_type * vi = m_array.values + ci->offset;
    for (; attr != last; ++attr, ++vi)
    {
        const typename std::iterator_traits<I>::value_type v = *attr;
        if (v.second == 0)  { --vi; continue; }

        chunk * const ci_ = m_array.map + v.first/SIZEOF_CHUNK;

        if (ci != ci_)
        {
            ci = ci_;
            ci->offset = key_type(vi - m_array.values);
        }

        ci->mask |= 1UL << (SIZEOF_CHUNK - 1 - (v.first % SIZEOF_CHUNK));
        *vi = v.second;
    }
}


inline
sparse::operator bool () const throw()
{
    return m_array.map != 0;
}

inline
size_t sparse::size() const throw()
{
    return m_nchunks*SIZEOF_CHUNK;
}

inline
size_t sparse::_sizeof() const throw()
{
    return sizeof(sparse) + capacity()*sizeof(mapped_type) + m_nchunks*sizeof(chunk);
}

} // namespace graphite2