Skip to content

Commit 6354549

Browse files
authored
Merge branch 'master' into snyk-fix-0967dab6c7294532dde05e178b572fee
2 parents 7932935 + 3f64b9a commit 6354549

File tree

6 files changed

+61
-52
lines changed

6 files changed

+61
-52
lines changed

CMakeLists.txt

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -168,18 +168,6 @@ IF(BUILD_ORT)
168168
ENDIF()
169169
ENDIF()
170170

171-
#----------------------------------------------------------------------------------------------
172-
173-
IF (APPLE)
174-
FIND_LIBRARY(MKL_LIBRARIES NAMES mklml
175-
PATHS ${depsAbs}/mkl/lib)
176-
IF (NOT MKL_LIBRARIES)
177-
MESSAGE(FATAL_ERROR "Could not find MKL for Mac")
178-
ENDIF()
179-
SET(platDeps "${MKL_LIBRARIES}")
180-
ENDIF()
181-
182-
#----------------------------------------------------------------------------------------------
183171

184172
IF(BUILD_TFLITE)
185173
# Find TensorFlow Lite stuff and build our wrapper
@@ -307,14 +295,6 @@ IF(BUILD_ORT)
307295
FILES_MATCHING PATTERN ${LIB_PATTERN})
308296
ENDIF()
309297

310-
#----------------------------------------------------------------------------------------------
311-
312-
IF (APPLE)
313-
INSTALL(DIRECTORY ${depsAbs}/mkl/lib DESTINATION ${installAbs}/backends/redisai_torch
314-
FILES_MATCHING PATTERN ${LIB_PATTERN})
315-
ENDIF()
316-
317-
#----------------------------------------------------------------------------------------------
318298

319299
IF (NOT ${installAbs} STREQUAL ${CMAKE_SOURCE_DIR}/install-${DEVICE})
320300
INSTALL_SYMLINK(${installAbs} ${CMAKE_SOURCE_DIR}/install-${DEVICE})

get_deps.sh

Lines changed: 28 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,16 @@ if [[ $OS == linux ]]; then
144144
else
145145
echo "Only x64 is supported currently"
146146
fi
147-
else
148-
echo "Only Linux OS is supported currently"
147+
elif [[ $OS == macos ]]; then
148+
TF_OS=darwin
149+
TF_BUILD=cpu
150+
TF_ARCH=x86_64
151+
if [[ $WITH_TF == S3 ]]; then
152+
LIBTF_URL_BASE=https://s3.amazonaws.com/redismodules/tensorflow
153+
else
154+
LIBTF_URL_BASE=https://storage.googleapis.com/tensorflow/libtensorflow
155+
fi
156+
149157
fi
150158

151159
LIBTF_ARCHIVE=libtensorflow-${TF_BUILD}-${TF_OS}-${TF_ARCH}-${TF_VERSION}.tar.gz
@@ -163,11 +171,11 @@ if [[ $OS == linux ]]; then
163171
TFLITE_OS="linux"
164172
if [[ $ARCH == x64 ]]; then
165173
TFLITE_ARCH=x86_64
166-
else
167-
echo "Only x64 is supported currently"
168174
fi
169-
else
170-
echo "Only Linux OS is supported currently"
175+
elif [[ $OS == macos ]]; then
176+
TFLITE_OS=darwin
177+
# TFLITE_BUILD=cpu
178+
TFLITE_ARCH=x86_64
171179
fi
172180

173181
LIBTFLITE_ARCHIVE=libtensorflowlite-${TFLITE_OS}-${TFLITE_ARCH}-${TFLITE_VERSION}.tar.gz
@@ -191,15 +199,17 @@ if [[ $OS == linux ]]; then
191199
else
192200
echo "Only x64 is supported currently"
193201
fi
194-
else
195-
echo "Only Linux OS is supported currently"
202+
LIBTORCH_ARCHIVE=libtorch-cxx11-abi-shared-with-deps-${PT_VERSION}%2B${PT_BUILD}.zip
203+
204+
elif [[ $OS == macos ]]; then
205+
PT_OS=macos
206+
PT_ARCH=x86_64
207+
PT_BUILD=cpu
208+
PT_REPACK=1
209+
LIBTORCH_ARCHIVE=libtorch-macos-${PT_VERSION}.zip
196210
fi
197211

198-
if [[ $GPU != 1 ]]; then
199-
LIBTORCH_ARCHIVE=libtorch-cxx11-abi-shared-with-deps-${PT_VERSION}%2B${PT_BUILD}.zip
200-
else
201-
LIBTORCH_ARCHIVE=libtorch-cxx11-abi-shared-with-deps-${PT_VERSION}%2B${PT_BUILD}.zip
202-
fi
212+
203213
LIBTORCH_URL=https://download.pytorch.org/libtorch/$PT_BUILD/$LIBTORCH_ARCHIVE
204214

205215
if [[ $WITH_PT != 0 ]]; then
@@ -222,8 +232,11 @@ if [[ $OS == linux ]]; then
222232
else
223233
echo "Only x64 is supported currently"
224234
fi
225-
else
226-
echo "Only Linux OS is supported currently"
235+
elif [[ $OS == macos ]]; then
236+
ORT_OS=osx
237+
ORT_ARCH=x64
238+
ORT_BUILD=""
239+
ORT_URL_BASE=https://github.com/microsoft/onnxruntime/releases/download/v${ORT_VERSION}
227240
fi
228241

229242
ORT_ARCHIVE=onnxruntime-${ORT_OS}-${ORT_ARCH}${ORT_BUILD}-${ORT_VERSION}.tgz

opt/build/docker/dockerfile.tmpl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,12 @@ FROM redisfab/redis:{{REDIS_VERSION}}-{{REDIS_ARCH}}-{{REDIS_OSNICK}}
7676
{% endif %}
7777

7878
ARG PACK
79+
# centos8 specific integration until a move to rocky or similar
80+
{% if REDIS_OSNICK == "centos8" %}
81+
RUN cd /etc/yum.repos.d/
82+
RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
83+
RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
84+
{% endif %}
7985

8086
RUN if [ ! -z $(command -v apt-get) ]; then apt-get -qq update; apt-get -q install -y libgomp1; fi
8187
RUN if [ ! -z $(command -v yum) ]; then yum install -y libgomp; fi

opt/redis_valgrind.sup

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,4 +184,11 @@
184184
...
185185
fun:_dl_catch_exception
186186
...
187-
}
187+
}
188+
189+
{
190+
<ignore_torch_cond_jump>
191+
Memcheck:Cond
192+
...
193+
obj:*/libtorch_cpu.so*
194+
}

src/redisai.c

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -736,10 +736,11 @@ int RedisAI_ScriptStore_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **ar
736736
ctx, "ERR Insufficient arguments, missing script entry points");
737737
}
738738

739-
array_new_on_stack(const char *, nEntryPoints, entryPoints);
739+
const char **entryPoints = array_new(const char *, nEntryPoints);
740740
for (size_t i = 0; i < nEntryPoints; i++) {
741741
const char *entryPoint;
742742
if (AC_GetString(&ac, &entryPoint, NULL, 0) != AC_OK) {
743+
array_free(entryPoints);
743744
return RedisModule_ReplyWithError(
744745
ctx, "ERR Insufficient arguments, missing script entry points");
745746
}
@@ -754,6 +755,7 @@ int RedisAI_ScriptStore_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **ar
754755
}
755756

756757
if (scriptdef == NULL) {
758+
array_free(entryPoints);
757759
return RedisModule_ReplyWithError(ctx, "ERR Insufficient arguments, missing script SOURCE");
758760
}
759761

@@ -767,6 +769,7 @@ int RedisAI_ScriptStore_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **ar
767769
"Backend TORCH not loaded, will try loading default backend");
768770
int ret = RAI_LoadDefaultBackend(ctx, RAI_BACKEND_TORCH);
769771
if (ret == REDISMODULE_ERR) {
772+
array_free(entryPoints);
770773
RedisModule_Log(ctx, "warning", "Could not load TORCH default backend");
771774
int ret = RedisModule_ReplyWithError(ctx, "ERR Could not load backend");
772775
RAI_ClearError(&err);
@@ -776,7 +779,7 @@ int RedisAI_ScriptStore_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **ar
776779
script =
777780
RAI_ScriptCompile(devicestr, tag, scriptdef, entryPoints, (size_t)nEntryPoints, &err);
778781
}
779-
782+
array_free(entryPoints);
780783
if (err.code != RAI_OK) {
781784
int ret = RedisModule_ReplyWithError(ctx, err.detail_oneline);
782785
RAI_ClearError(&err);

tests/flow/test_serializations.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def test_tf_model_serialization(env):
3131
'minbatchtimeout', 1000, 'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
3232

3333
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
34-
env.restartAndReload(timeout_sec=120)
34+
env.restartAndReload(timeout_sec=300)
3535
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
3636
= con.execute_command("AI.MODELGET", key_name, "META")
3737
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -42,7 +42,7 @@ def test_tf_model_serialization(env):
4242
con.execute_command('AI.MODELSTORE', key_name, 'TF', DEVICE, 'TAG', 'TF_GRAPH1', 'batchsize', 4, 'minbatchsize', 2,
4343
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
4444
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
45-
env.restartAndReload(timeout_sec=120)
45+
env.restartAndReload(timeout_sec=300)
4646
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
4747
= con.execute_command("AI.MODELGET", key_name, "META")
4848
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -52,7 +52,7 @@ def test_tf_model_serialization(env):
5252
con.execute_command('AI.MODELSTORE', key_name, 'TF', DEVICE, 'TAG', 'TF_GRAPH2', 'batchsize', 4,
5353
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
5454
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
55-
env.restartAndReload(timeout_sec=120)
55+
env.restartAndReload(timeout_sec=300)
5656
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
5757
= con.execute_command("AI.MODELGET", key_name, "META")
5858
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -62,7 +62,7 @@ def test_tf_model_serialization(env):
6262
con.execute_command('AI.MODELSTORE', key_name, 'TF', DEVICE, 'TAG', 'TF_GRAPH3',
6363
'INPUTS', 2, 'a', 'b', 'OUTPUTS', 1, 'mul', 'BLOB', tf_model)
6464
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
65-
env.restartAndReload(timeout_sec=120)
65+
env.restartAndReload(timeout_sec=300)
6666
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
6767
= con.execute_command("AI.MODELGET", key_name, "META")
6868
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -89,7 +89,7 @@ def test_torch_model_serialization(env):
8989
'minbatchtimeout', 1000, 'BLOB', torch_model)
9090

9191
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
92-
env.restartAndReload(timeout_sec=120)
92+
env.restartAndReload(timeout_sec=300)
9393
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
9494
= con.execute_command("AI.MODELGET", key_name, "META")
9595
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -99,7 +99,7 @@ def test_torch_model_serialization(env):
9999
# Reinsert the model (without minbatchtimeout)
100100
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', DEVICE, 'TAG', 'PT_MINIMAL1', 'batchsize', 4, 'minbatchsize', 2,
101101
'BLOB', torch_model)
102-
env.restartAndReload(timeout_sec=120)
102+
env.restartAndReload(timeout_sec=300)
103103
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
104104
= con.execute_command("AI.MODELGET", key_name, "META")
105105
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -108,7 +108,7 @@ def test_torch_model_serialization(env):
108108
# Reinsert the model (without minbatch)
109109
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', DEVICE, 'TAG', 'PT_MINIMAL2', 'batchsize', 4,
110110
'BLOB', torch_model)
111-
env.restartAndReload(timeout_sec=120)
111+
env.restartAndReload(timeout_sec=300)
112112
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
113113
= con.execute_command("AI.MODELGET", key_name, "META")
114114
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -117,7 +117,7 @@ def test_torch_model_serialization(env):
117117
# Reinsert the model (without batching)
118118
con.execute_command('AI.MODELSTORE', key_name, 'TORCH', DEVICE, 'TAG', 'PT_MINIMAL3',
119119
'BLOB', torch_model)
120-
env.restartAndReload(timeout_sec=120)
120+
env.restartAndReload(timeout_sec=300)
121121
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
122122
= con.execute_command("AI.MODELGET", key_name, "META")
123123
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -144,7 +144,7 @@ def test_torch_script_serialization(env):
144144
con.execute_command('AI.SCRIPTSTORE', key_name, DEVICE, 'TAG', 'TORCH_SCRIPT', 'ENTRY_POINTS', 2, 'bar', 'bar_variadic', 'SOURCE', torch_script)
145145

146146
# Redis should save the stored script by calling the AOF rewrite callback and then reload from AOF.
147-
env.restartAndReload(timeout_sec=120)
147+
env.restartAndReload(timeout_sec=300)
148148
_, device, _, tag, _, entry_points = con.execute_command("AI.SCRIPTGET", key_name, "META")
149149
env.assertEqual([device, tag, entry_points], [DEVICE.encode(), b"TORCH_SCRIPT", [b'bar', b'bar_variadic']])
150150
torch_script_run(env, key_name)
@@ -165,7 +165,7 @@ def test_onnx_serialization(env):
165165
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', DEVICE, 'TAG', 'ONNX_LINEAR_IRIS', 'batchsize', 4, 'minbatchsize', 2,
166166
'minbatchtimeout', 1000, 'BLOB', onnx_model)
167167
# Redis should save the stored model by calling the AOF rewrite callback and then reload from AOF.
168-
env.restartAndReload(timeout_sec=120)
168+
env.restartAndReload(timeout_sec=300)
169169
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout\
170170
= con.execute_command("AI.MODELGET", key_name, "META")
171171
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -175,7 +175,7 @@ def test_onnx_serialization(env):
175175
# Reinsert the model (without minbatchtimeout)
176176
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', DEVICE, 'TAG', 'ONNX_LINEAR_IRIS1', 'batchsize', 4,
177177
'minbatchsize', 2, 'BLOB', onnx_model)
178-
env.restartAndReload(timeout_sec=120)
178+
env.restartAndReload(timeout_sec=300)
179179
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
180180
= con.execute_command("AI.MODELGET", key_name, "META")
181181
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -184,7 +184,7 @@ def test_onnx_serialization(env):
184184
# Reinsert the model (without minbatch)
185185
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', DEVICE, 'TAG', 'ONNX_LINEAR_IRIS2', 'batchsize', 4,
186186
'BLOB', onnx_model)
187-
env.restartAndReload(timeout_sec=120)
187+
env.restartAndReload(timeout_sec=300)
188188
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
189189
= con.execute_command("AI.MODELGET", key_name, "META")
190190
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -193,7 +193,7 @@ def test_onnx_serialization(env):
193193
# Reinsert the model (without batching)
194194
con.execute_command('AI.MODELSTORE', key_name, 'ONNX', DEVICE, 'TAG', 'ONNX_LINEAR_IRIS3',
195195
'BLOB', onnx_model)
196-
env.restartAndReload(timeout_sec=120)
196+
env.restartAndReload(timeout_sec=300)
197197
_, backend, _, device, _, tag, _, batchsize, _, minbatchsize, _ , inputs, _, outputs, _, minbatchtimeout \
198198
= con.execute_command("AI.MODELGET", key_name, "META")
199199
env.assertEqual([backend, device, tag, batchsize, minbatchsize, minbatchtimeout, inputs, outputs],
@@ -205,7 +205,7 @@ def test_tensor_serialization(env):
205205
con = get_connection(env, key_name)
206206
con.execute_command('AI.TENSORSET', key_name, 'INT32', 2, 1, 'VALUES', 1, 2)
207207
# Redis should save the stored tensor by calling the AOF rewrite callback and then reload from AOF.
208-
env.restartAndReload(timeout_sec=120)
208+
env.restartAndReload(timeout_sec=300)
209209
_, tensor_type, _, tensor_shape = con.execute_command('AI.TENSORGET', key_name, 'META')
210210
env.assertEqual([tensor_type, tensor_shape], [b"INT32", [2, 1]])
211211
values = con.execute_command('AI.TENSORGET', key_name, 'VALUES')

0 commit comments

Comments
 (0)