Skip to content

Commit e23d364

Browse files
authored
Normalized logtag table for StreamDBClient queries (#239)
* Initial test version for replacing old logtag column reads with normalized logtag table. REBASE * Added logtagIdTest(). REBASE * Updated journaldb.sql dump with new schema change of dropping logtag column. REBASE * Updated jooq with schema from logtag_id_indexes branch. Updated jooq auto-generated code with new schema. REBASE * Updated StreamDBClientTest tests with new schema. REBASE * Improved the usage of logtag table in StreamDBClient queries. Added pullToSliceTableInvalidIndexTest() to StreamDBClient tests. REBASE * Apply spotless * Changed tag to tag_id in the CreateIndexIncludeStep * Fixed errors done in journaldb.sql during rebasing, set logtag_id to NOT NULL and restored missing index. * Removed tag field from GetArchivedObjectsFilterTable as redundant. * Removed logtag column from slicetable as redundant. As consequence, changed all record11 usages to record10 in the code. REBASE
1 parent 7173a15 commit e23d364

16 files changed

Lines changed: 155 additions & 54 deletions

database/journaldb.sql

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,8 @@ CREATE TABLE `logfile` (
227227
KEY `cix_logfile_epoch_hour_host_id_logtag` (`epoch_hour`,`host_id`,`logtag`),
228228
KEY `ix_logfile_epoch_expires` (`epoch_expires`),
229229
KEY `fk_logfile__logtag_id` (`logtag_id`),
230+
KEY `cix_logfile_epoch_hour_host_id_logtag_id` (`epoch_hour`,`host_id`,`logtag_id`),
231+
KEY `cix_logfile_logdate_host_id_logtag_id` (`logdate`,`host_id`,`logtag_id`),
230232
CONSTRAINT `fk_logfile__logtag_id` FOREIGN KEY (`logtag_id`) REFERENCES `logtag` (`id`),
231233
CONSTRAINT `fk_logfile__source_system_id` FOREIGN KEY (`source_system_id`) REFERENCES `source_system` (`id`),
232234
CONSTRAINT `logfile_ibfk_1` FOREIGN KEY (`bucket_id`) REFERENCES `bucket` (`id`),

src/main/java/com/teragrep/pth_06/jooq/generated/journaldb/Indexes.java

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,10 @@ public class Indexes {
8585
public static final Index LOGFILE_BUCKET_ID = Indexes0.LOGFILE_BUCKET_ID;
8686
public static final Index LOGFILE_CATEGORY_ID = Indexes0.LOGFILE_CATEGORY_ID;
8787
public static final Index LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG = Indexes0.LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG;
88+
public static final Index LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG_ID = Indexes0.LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG_ID;
8889
public static final Index LOGFILE_CIX_LOGFILE_HOST_ID_LOGTAG_LOGDATE = Indexes0.LOGFILE_CIX_LOGFILE_HOST_ID_LOGTAG_LOGDATE;
8990
public static final Index LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG = Indexes0.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG;
91+
public static final Index LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG_ID = Indexes0.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG_ID;
9092
public static final Index LOGFILE_FK_LOGFILE__LOGTAG_ID = Indexes0.LOGFILE_FK_LOGFILE__LOGTAG_ID;
9193
public static final Index LOGFILE_IX_LOGFILE_EPOCH_EXPIRES = Indexes0.LOGFILE_IX_LOGFILE_EPOCH_EXPIRES;
9294
public static final Index LOGFILE_IX_LOGFILE_EXPIRATION = Indexes0.LOGFILE_IX_LOGFILE_EXPIRATION;
@@ -108,8 +110,10 @@ private static class Indexes0 {
108110
public static Index LOGFILE_BUCKET_ID = Internal.createIndex("bucket_id", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.BUCKET_ID }, false);
109111
public static Index LOGFILE_CATEGORY_ID = Internal.createIndex("category_id", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.CATEGORY_ID }, false);
110112
public static Index LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG = Internal.createIndex("cix_logfile_epoch_hour_host_id_logtag", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.EPOCH_HOUR, Logfile.LOGFILE.HOST_ID, Logfile.LOGFILE.LOGTAG }, false);
113+
public static Index LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG_ID = Internal.createIndex("cix_logfile_epoch_hour_host_id_logtag_id", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.EPOCH_HOUR, Logfile.LOGFILE.HOST_ID, Logfile.LOGFILE.LOGTAG_ID }, false);
111114
public static Index LOGFILE_CIX_LOGFILE_HOST_ID_LOGTAG_LOGDATE = Internal.createIndex("cix_logfile_host_id_logtag_logdate", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.HOST_ID, Logfile.LOGFILE.LOGTAG, Logfile.LOGFILE.LOGDATE }, false);
112115
public static Index LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG = Internal.createIndex("cix_logfile_logdate_host_id_logtag", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.LOGDATE, Logfile.LOGFILE.HOST_ID, Logfile.LOGFILE.LOGTAG }, false);
116+
public static Index LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG_ID = Internal.createIndex("cix_logfile_logdate_host_id_logtag_id", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.LOGDATE, Logfile.LOGFILE.HOST_ID, Logfile.LOGFILE.LOGTAG_ID }, false);
113117
public static Index LOGFILE_FK_LOGFILE__LOGTAG_ID = Internal.createIndex("fk_logfile__logtag_id", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.LOGTAG_ID }, false);
114118
public static Index LOGFILE_IX_LOGFILE_EPOCH_EXPIRES = Internal.createIndex("ix_logfile_epoch_expires", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.EPOCH_EXPIRES }, false);
115119
public static Index LOGFILE_IX_LOGFILE_EXPIRATION = Internal.createIndex("ix_logfile_expiration", Logfile.LOGFILE, new OrderField[] { Logfile.LOGFILE.EXPIRATION }, false);

src/main/java/com/teragrep/pth_06/jooq/generated/journaldb/tables/Logfile.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
@SuppressWarnings({ "all", "unchecked", "rawtypes" })
9292
public class Logfile extends TableImpl<LogfileRecord> {
9393

94-
private static final long serialVersionUID = -209761724;
94+
private static final long serialVersionUID = -375133312;
9595

9696
/**
9797
* The reference instance of <code>journaldb.logfile</code>
@@ -246,7 +246,7 @@ public Schema getSchema() {
246246

247247
@Override
248248
public List<Index> getIndexes() {
249-
return Arrays.<Index>asList(Indexes.LOGFILE_BUCKET_ID, Indexes.LOGFILE_CATEGORY_ID, Indexes.LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG, Indexes.LOGFILE_CIX_LOGFILE_HOST_ID_LOGTAG_LOGDATE, Indexes.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG, Indexes.LOGFILE_FK_LOGFILE__LOGTAG_ID, Indexes.LOGFILE_IX_LOGFILE_EPOCH_EXPIRES, Indexes.LOGFILE_IX_LOGFILE_EXPIRATION, Indexes.LOGFILE_IX_LOGFILE__SOURCE_SYSTEM_ID, Indexes.LOGFILE_PRIMARY, Indexes.LOGFILE_UIX_LOGFILE_OBJECT_HASH);
249+
return Arrays.<Index>asList(Indexes.LOGFILE_BUCKET_ID, Indexes.LOGFILE_CATEGORY_ID, Indexes.LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG, Indexes.LOGFILE_CIX_LOGFILE_EPOCH_HOUR_HOST_ID_LOGTAG_ID, Indexes.LOGFILE_CIX_LOGFILE_HOST_ID_LOGTAG_LOGDATE, Indexes.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG, Indexes.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG_ID, Indexes.LOGFILE_FK_LOGFILE__LOGTAG_ID, Indexes.LOGFILE_IX_LOGFILE_EPOCH_EXPIRES, Indexes.LOGFILE_IX_LOGFILE_EXPIRATION, Indexes.LOGFILE_IX_LOGFILE__SOURCE_SYSTEM_ID, Indexes.LOGFILE_PRIMARY, Indexes.LOGFILE_UIX_LOGFILE_OBJECT_HASH);
250250
}
251251

252252
@Override

src/main/java/com/teragrep/pth_06/planner/ArchiveQuery.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
package com.teragrep.pth_06.planner;
4747

4848
import org.apache.spark.sql.connector.metric.CustomTaskMetric;
49-
import org.jooq.Record11;
49+
import org.jooq.Record10;
5050
import org.jooq.Result;
5151
import org.jooq.types.ULong;
5252

@@ -60,7 +60,7 @@
6060
*/
6161
public interface ArchiveQuery {
6262

63-
public abstract Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(
63+
public abstract Result<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(
6464
long startHour,
6565
long endHour
6666
);

src/main/java/com/teragrep/pth_06/planner/ArchiveQueryProcessor.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ private void seekToResults() {
145145
* @return Data between start hour and end hour.
146146
*/
147147
@Override
148-
public Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(
148+
public Result<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> processBetweenUnixEpochHours(
149149
long startHour,
150150
long endHour
151151
) {

src/main/java/com/teragrep/pth_06/planner/GetArchivedObjectsFilterTable.java

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
import org.jooq.*;
5050
import org.jooq.conf.ParamType;
5151
import org.jooq.impl.DSL;
52+
import org.jooq.types.ULong;
5253
import org.jooq.types.UShort;
5354
import org.slf4j.Logger;
5455
import org.slf4j.LoggerFactory;
@@ -66,7 +67,7 @@ public final class GetArchivedObjectsFilterTable {
6667
public static final Table<Record> FILTER_TABLE = DSL.table(DSL.name(tmpTableName));
6768
public static final Field<UShort> host_id = DSL.field(DSL.name(tmpTableName, "host_id"), UShort.class);
6869
public static final Field<String> host = DSL.field(DSL.name(tmpTableName, "host"), String.class);
69-
public static final Field<String> tag = DSL.field(DSL.name(tmpTableName, "tag"), String.class);
70+
public static final Field<ULong> tag_id = DSL.field(DSL.name(tmpTableName, "tag_id"), ULong.class);
7071
public static final Field<String> directory = DSL.field(DSL.name(tmpTableName, "directory"), String.class);
7172
public static final Field<String> stream = DSL.field(DSL.name(tmpTableName, "stream"), String.class);
7273
private static final Index hostIndex = DSL.index(DSL.name("cix_host_id_tag"));
@@ -105,7 +106,7 @@ public void create(final Condition streamdbCondition) {
105106
STREAMDB.STREAM.DIRECTORY.as(GetArchivedObjectsFilterTable.directory)
106107
)
107108
.select(STREAMDB.STREAM.STREAM_.as(GetArchivedObjectsFilterTable.stream))
108-
.select(STREAMDB.STREAM.TAG.as(GetArchivedObjectsFilterTable.tag))
109+
.select(JOURNALDB.LOGTAG.ID.as(GetArchivedObjectsFilterTable.tag_id))
109110
.select((JOURNALDB.HOST.NAME.as(GetArchivedObjectsFilterTable.host)))
110111
.select((JOURNALDB.HOST.ID.as(GetArchivedObjectsFilterTable.host_id)))
111112
.from(STREAMDB.STREAM)
@@ -115,6 +116,8 @@ public void create(final Condition streamdbCondition) {
115116
.on((STREAMDB.HOST.GID).eq(STREAMDB.LOG_GROUP.ID))
116117
.innerJoin(JOURNALDB.HOST)
117118
.on((STREAMDB.HOST.NAME).eq(JOURNALDB.HOST.NAME))
119+
.innerJoin(JOURNALDB.LOGTAG)
120+
.on((STREAMDB.STREAM.TAG).eq(JOURNALDB.LOGTAG.LOGTAG_))
118121
// following change
119122
.where(streamdbCondition)
120123
);
@@ -131,7 +134,7 @@ public void create(final Condition streamdbCondition) {
131134
//.on(FILTER_TABLE, directory, host_id, tag, stream).execute(); // FIXME this happens only on dev kube due to old mariadb: Index column size too large. The maximum column size is 767 bytes.
132135
.on(
133136
GetArchivedObjectsFilterTable.FILTER_TABLE, GetArchivedObjectsFilterTable.host_id,
134-
GetArchivedObjectsFilterTable.tag
137+
GetArchivedObjectsFilterTable.tag_id
135138
)
136139
) {
137140
if (isLogSQL) {

src/main/java/com/teragrep/pth_06/planner/NestedTopNQuery.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -103,8 +103,7 @@ public Table<Record> getTableStatement(Condition journaldbConditionArg, Date day
103103
SelectOnConditionStep<Record> selectOnConditionStep = DSL
104104
.select(resultFields)
105105
.from(GetArchivedObjectsFilterTable.FILTER_TABLE)
106-
.innerJoin(JOURNALDB.LOGFILE.forceIndex(Indexes.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG.getName()))
107-
.on(JOURNALDB.LOGFILE.HOST_ID.eq(GetArchivedObjectsFilterTable.host_id).and(JOURNALDB.LOGFILE.LOGTAG.eq(GetArchivedObjectsFilterTable.tag)));
106+
.innerJoin(JOURNALDB.LOGFILE.forceIndex(Indexes.LOGFILE_CIX_LOGFILE_LOGDATE_HOST_ID_LOGTAG_ID.getName())).on(JOURNALDB.LOGFILE.LOGTAG_ID.eq(GetArchivedObjectsFilterTable.tag_id)).and(JOURNALDB.LOGFILE.HOST_ID.eq(GetArchivedObjectsFilterTable.host_id));
108107

109108
if (streamDBClient.bloomEnabled()) {
110109
// join all tables needed for the condition generated by walker

src/main/java/com/teragrep/pth_06/planner/SliceTable.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,6 @@ public final class SliceTable {
6767
public static final Field<String> directory = DSL.field(DSL.name(sliceTableName, "directory"), String.class);
6868
public static final Field<String> stream = DSL.field(DSL.name(sliceTableName, "stream"), String.class);
6969
public static final Field<String> host = DSL.field(DSL.name(sliceTableName, "host"), String.class);
70-
public static final Field<String> logtag = DSL.field(DSL.name(sliceTableName, "logtag"), String.class);
7170
public static final Field<Date> logdate = DSL.field(DSL.name(sliceTableName, "logdate"), Date.class);
7271
public static final Field<String> bucket = DSL.field(DSL.name(sliceTableName, "bucket"), String.class);
7372
public static final Field<String> path = DSL.field(DSL.name(sliceTableName, "path"), String.class);
@@ -95,7 +94,7 @@ public void create() {
9594
dropTableStep.execute();
9695
}
9796
try (
98-
final CreateTableColumnStep createTableStep = ctx.createTemporaryTable(SLICE_TABLE).columns(id, directory, stream, host, logtag, logdate, bucket, path, logtime, filesize, uncompressedFilesize)
97+
final CreateTableColumnStep createTableStep = ctx.createTemporaryTable(SLICE_TABLE).columns(id, directory, stream, host, logdate, bucket, path, logtime, filesize, uncompressedFilesize)
9998
) {
10099
if (isLogSQL) {
101100
LOGGER

src/main/java/com/teragrep/pth_06/planner/StreamDBClient.java

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -204,20 +204,21 @@ public CustomTaskMetric[] currentDatabaseMetrics() {
204204
public int pullToSliceTable(Date day) {
205205
LOGGER.debug("StreamDBClient.pullToSliceTable called for date <{}>", day);
206206

207-
SelectOnConditionStep<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> select = ctx
207+
SelectOnConditionStep<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> select = ctx
208208
.select(
209209
JOURNALDB.LOGFILE.ID, nestedTopNQuery.directory(), nestedTopNQuery.stream(),
210-
JOURNALDB.HOST.NAME, JOURNALDB.LOGFILE.LOGTAG, JOURNALDB.LOGFILE.LOGDATE, JOURNALDB.BUCKET.NAME,
211-
JOURNALDB.LOGFILE.PATH, nestedTopNQuery.logtime(), JOURNALDB.LOGFILE.FILE_SIZE,
212-
JOURNALDB.LOGFILE.UNCOMPRESSED_FILE_SIZE
210+
JOURNALDB.HOST.NAME, JOURNALDB.LOGFILE.LOGDATE, JOURNALDB.BUCKET.NAME, JOURNALDB.LOGFILE.PATH,
211+
nestedTopNQuery.logtime(), JOURNALDB.LOGFILE.FILE_SIZE, JOURNALDB.LOGFILE.UNCOMPRESSED_FILE_SIZE
213212
)
214213
.from(nestedTopNQuery.getTableStatement(journaldbCondition, day))
215214
.join(JOURNALDB.LOGFILE)
216215
.on(JOURNALDB.LOGFILE.ID.eq(nestedTopNQuery.id()))
217216
.join(JOURNALDB.BUCKET)
218217
.on(JOURNALDB.BUCKET.ID.eq(JOURNALDB.LOGFILE.BUCKET_ID))
219218
.join(JOURNALDB.HOST)
220-
.on(JOURNALDB.HOST.ID.eq(JOURNALDB.LOGFILE.HOST_ID));
219+
.on(JOURNALDB.HOST.ID.eq(JOURNALDB.LOGFILE.HOST_ID))
220+
.join(JOURNALDB.LOGTAG)
221+
.on(JOURNALDB.LOGTAG.ID.eq(JOURNALDB.LOGFILE.LOGTAG_ID));
221222

222223
final Timer.Context timerCtx = metricRegistry.timer("ArchiveDatabaseLatency").time();
223224
final int rows;
@@ -290,7 +291,7 @@ void deleteRangeFromSliceTable(long start, long end) {
290291
LOGGER.debug("StreamDBClient.deleteRangeFromSliceTable exit");
291292
}
292293

293-
Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> getHourRange(
294+
Result<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> getHourRange(
294295
long excludedStartHour,
295296
long includedEndHour
296297
) {
@@ -299,10 +300,10 @@ Result<Record11<ULong, String, String, String, String, Date, String, String, Lon
299300
"StreamDBClient.getHourRange called excludedStartHour <{}> includedEndHour <{}>",
300301
excludedStartHour, includedEndHour
301302
);
302-
Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> result = ctx
303+
Result<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> result = ctx
303304
.select(
304-
SliceTable.id, SliceTable.directory, SliceTable.stream, SliceTable.host, SliceTable.logtag,
305-
SliceTable.logdate, SliceTable.bucket, SliceTable.path, SliceTable.logtime, SliceTable.filesize,
305+
SliceTable.id, SliceTable.directory, SliceTable.stream, SliceTable.host, SliceTable.logdate,
306+
SliceTable.bucket, SliceTable.path, SliceTable.logtime, SliceTable.filesize,
306307
SliceTable.uncompressedFilesize
307308
)
308309
.from(SliceTable.SLICE_TABLE)

src/main/java/com/teragrep/pth_06/scheduler/ArchiveRangeProcessor.java

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
import com.teragrep.pth_06.planner.offset.DatasourceOffset;
5151
import org.apache.spark.sql.connector.read.streaming.Offset;
5252
import org.jooq.Record;
53-
import org.jooq.Record11;
53+
import org.jooq.Record10;
5454
import org.jooq.Result;
5555
import org.jooq.types.ULong;
5656
import org.slf4j.Logger;
@@ -75,7 +75,7 @@ public List<BatchUnit> processRange(Offset start, Offset end) {
7575

7676
List<BatchUnit> batchUnits = new ArrayList<>();
7777

78-
Result<Record11<ULong, String, String, String, String, Date, String, String, Long, ULong, ULong>> result = aq
78+
Result<Record10<ULong, String, String, String, Date, String, String, Long, ULong, ULong>> result = aq
7979
.processBetweenUnixEpochHours(
8080
((DatasourceOffset) start).getArchiveOffset().offset(),
8181
((DatasourceOffset) end).getArchiveOffset().offset()
@@ -84,19 +84,19 @@ public List<BatchUnit> processRange(Offset start, Offset end) {
8484
for (Record r : result) {
8585
// uncompressed size can be null
8686
long uncompressedSize = -1L;
87-
if (r.get(10) != null) {
88-
uncompressedSize = r.get(10, Long.class);
87+
if (r.get(9) != null) {
88+
uncompressedSize = r.get(9, Long.class);
8989
}
9090

9191
batchUnits
9292
.add(new BatchUnit(new ArchiveS3ObjectMetadata(r.get(0, String.class), // id
93-
r.get(6, String.class), // bucket
94-
r.get(7, String.class), // path
93+
r.get(5, String.class), // bucket
94+
r.get(6, String.class), // path
9595
r.get(1, String.class), // directory
9696
r.get(2, String.class), // stream
9797
r.get(3, String.class), // host
98-
r.get(8, Long.class), // logtime
99-
r.get(9, Long.class), // compressedSize
98+
r.get(7, Long.class), // logtime
99+
r.get(8, Long.class), // compressedSize
100100
uncompressedSize // uncompressedSize
101101
)));
102102
}

0 commit comments

Comments
 (0)