diff --git a/.bashrc b/.bashrc new file mode 100644 index 0000000000..aba1296dac --- /dev/null +++ b/.bashrc @@ -0,0 +1,52 @@ +# .bashrc + +# User specific aliases and functions + +alias rm='rm -i' +alias cp='cp -i' +alias mv='mv -i' + +# Source global definitions +if [ -f /etc/bashrc ]; then + . /etc/bashrc +fi +export http_proxy=http://child-prc.intel.com:913 +export https_proxy=http://child-prc.intel.com:913 + +export PADDLEPADDLE_TP_CACHE="/home/guest/tp_cache" +#export LD_LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op:$LD_LIBRARY_PATH +#export LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op:$LIBRARY_PATH +#export DEEP_MD_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev530+g8d8f289.d20220512-py3.8-linux-x86_64.egg/deepmd/op +#export LD_LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op:$LD_LIBRARY_PATH +#export LIBRARY_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op:$LIBRARY_PATH +#export DEEP_MD_PATH=/usr/local/lib/python3.8/dist-packages/deepmd_kit-1.2.3.dev491+g1a06aa4-py3.8-linux-x86_64.egg/deepmd/op +# +#1.2.3.dev530+g8d8f289.d20220512 +export tensorflow_root=/home/tensorflowroot +export deepmd_root=/home/deepmdroot +# >>> conda initialize >>> +# !! Contents within this block are managed by 'conda init' !! +__conda_setup="$('/root/anaconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" +if [ $? -eq 0 ]; then + eval "$__conda_setup" +else + if [ -f "/root/anaconda3/etc/profile.d/conda.sh" ]; then + . "/root/anaconda3/etc/profile.d/conda.sh" + else + export PATH="/root/anaconda3/bin:$PATH" + fi +fi +unset __conda_setup +# <<< conda initialize <<< +export PATH=/home/jessie/cmake-3.21.0-linux-x86_64/bin:$PATH + +export PATH=/home/lammps-stable_29Oct2020/src:$PATH +#export LD_LIBRARY_PATH=/home/paddle-deepmd/source/build/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/deepmd-kit/source/build/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/paddle/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/third_party/install/mkldnn/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/Paddle/build/paddle_inference_install_dir/third_party/install/mklml/lib:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/Paddle/build/paddle/fluid/pybind/:$LD_LIBRARY_PATH +#export LD_LIBRARY_PATH=/home/paddle-deepmd/source/build:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/deepmd-kit/source/build:$LD_LIBRARY_PATH + diff --git a/compile_deepmd.sh b/compile_deepmd.sh index ec0480b743..2a8a2021df 100644 --- a/compile_deepmd.sh +++ b/compile_deepmd.sh @@ -1,6 +1,5 @@ rm -rf /home/deepmdroot/ && mkdir /home/deepmdroot && deepmd_root=/home/deepmdroot -cd /home/paddle-deepmd/source && rm -rf build && mkdir build && cd build -#cmake -DTENSORFLOW_ROOT=$tensorflow_root -DCMAKE_INSTALL_PREFIX=$deepmd_root -DPADDLE_ROOT=/home/Paddle/build/paddle_inference_install_dir -DUSE_CUDA_TOOLKIT=FALSE .. +cd /home/deepmd-kit/source && rm -rf build && mkdir build && cd build cmake -DPADDLE_ROOT=/home/Paddle/build/paddle_inference_install_dir -DUSE_CUDA_TOOLKIT=FALSE .. make -j 4 && make install make lammps diff --git a/compile_lammps.sh b/compile_lammps.sh index 7951f4a843..9d0f9e8fc7 100644 --- a/compile_lammps.sh +++ b/compile_lammps.sh @@ -3,6 +3,8 @@ cd /home rm -rf lammps-stable_29Oct2020/ tar -xzvf stable_29Oct2020.tar.gz cd lammps-stable_29Oct2020/src/ -cp -r /home/paddle-deepmd/source/build/USER-DEEPMD . +cp -r /home/deepmd-kit/source/build/USER-DEEPMD . +#cp -r /home/paddle-deepmd/source/build/USER-DEEPMD . make yes-kspace yes-user-deepmd -make serial -j 20 +#make serial -j 20 +make mpi -j 20 diff --git a/examples/water/lmp/in.lammps b/examples/water/lmp/in.lammps index a501a5ab27..4dcce1466f 100644 --- a/examples/water/lmp/in.lammps +++ b/examples/water/lmp/in.lammps @@ -1,5 +1,5 @@ # bulk water - +echo screen units metal boundary p p p atom_style atomic @@ -11,7 +11,7 @@ read_data water.lmp mass 1 16 mass 2 2 -pair_style deepmd frozen_model.pb +pair_style deepmd /home/deepmd-kit/examples/water/lmp/model.pdmodel /home/deepmd-kit/examples/water/lmp/model.pdiparams pair_coeff velocity all create 330.0 23456789 diff --git a/examples/water/lmp/model.pdiparams b/examples/water/lmp/model.pdiparams new file mode 100644 index 0000000000..033e12d0da Binary files /dev/null and b/examples/water/lmp/model.pdiparams differ diff --git a/examples/water/lmp/model.pdiparams.info b/examples/water/lmp/model.pdiparams.info new file mode 100644 index 0000000000..de3a13a349 Binary files /dev/null and b/examples/water/lmp/model.pdiparams.info differ diff --git a/examples/water/lmp/model.pdmodel b/examples/water/lmp/model.pdmodel new file mode 100644 index 0000000000..922dcb7e0e Binary files /dev/null and b/examples/water/lmp/model.pdmodel differ diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt index e1d9fa749e..7bdcf35b07 100755 --- a/source/CMakeLists.txt +++ b/source/CMakeLists.txt @@ -5,7 +5,10 @@ set(CMAKE_LINK_WHAT_YOU_USE TRUE) # build cpp or python interfaces if (NOT DEFINED BUILD_CPP_IF) set(BUILD_CPP_IF TRUE) -endif (NOT DEFINED BUILD_CPP_IF) + message(WARNING "WARNING! -DON_INFER is set") + add_definitions("-DON_INFER") +endif() + if (NOT DEFINED BUILD_PY_IF) set(BUILD_PY_IF FALSE) endif (NOT DEFINED BUILD_PY_IF) @@ -72,72 +75,72 @@ if (USE_CUDA_TOOLKIT) add_definitions("-D GOOGLE_CUDA") endif() -# find paddle +if(BUILD_PY_IF) +find_package(tensorflow REQUIRED) +else() find_package(Fluid REQUIRED) - -# find tensorflow, I need tf abi info -if (BUILD_PY_IF) - find_package(tensorflow REQUIRED) -endif (BUILD_PY_IF) +endif() # find threads find_package(Threads) # auto op_cxx_abi -#if (NOT DEFINED OP_CXX_ABI) -# if (BUILD_PY_IF) -# if (DEFINED TENSORFLOW_ROOT) -# set(FIND_ABI_CMD "import sys,os; sys.path.insert(0, os.path.join('${TENSORFLOW_ROOT}', '..')); import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')" ) -# else() -# set(FIND_ABI_CMD "import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')") -# endif() -# execute_process( -# COMMAND ${PYTHON_EXECUTABLE} "-c" "${FIND_ABI_CMD}" -# WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} -# OUTPUT_VARIABLE PY_CXX_ABI -# RESULT_VARIABLE PY_CXX_ABI_RESULT_VAR -# ERROR_VARIABLE PY_CXX_ABI_ERROR_VAR -# ) -# if (NOT ${PY_CXX_ABI_RESULT_VAR} EQUAL 0) -# message(FATAL_ERROR "Cannot determine cxx abi, error message: ${PY_CXX_ABI_ERROR_VAR}") -# endif() -# set(OP_CXX_ABI ${PY_CXX_ABI}) -# endif() -# if (BUILD_CPP_IF) -# try_run( -# CPP_CXX_ABI_RUN_RESULT_VAR CPP_CXX_ABI_COMPILE_RESULT_VAR -# ${CMAKE_CURRENT_BINARY_DIR}/tf_cxx_abi -# "${CMAKE_CURRENT_SOURCE_DIR}/cmake/tf_cxx_abi.cpp" -# LINK_LIBRARIES ${TensorFlowFramework_LIBRARY} -# CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${TensorFlow_INCLUDE_DIRS}" -# RUN_OUTPUT_VARIABLE CPP_CXX_ABI -# COMPILE_OUTPUT_VARIABLE CPP_CXX_ABI_COMPILE_OUTPUT_VAR -# ) -# if (NOT ${CPP_CXX_ABI_COMPILE_RESULT_VAR}) -# message(FATAL_ERROR "Failed to compile: \n ${CPP_CXX_ABI_COMPILE_OUTPUT_VAR}" ) -# endif() -# if (NOT ${CPP_CXX_ABI_RUN_RESULT_VAR} EQUAL "0") -# message(FATAL_ERROR "Failed to run, return code: ${CPP_CXX_ABI}" ) -# endif() -# if (DEFINED PY_CXX_ABI) -# if (NOT (${CPP_CXX_ABI} EQUAL ${PY_CXX_ABI})) -# message (WARNNING "NOT consistent CXX_ABIs: python interface of tf uses ${PY_CXX_ABI}, while c++ interface of tf uses ${CPP_CXX_ABI}, we follow c++ interface ") -# endif() -# endif() -# set(OP_CXX_ABI ${CPP_CXX_ABI}) -# endif() -# message (STATUS "Automatically determined OP_CXX_ABI=${OP_CXX_ABI} ") -#else() -# message (STATUS "User set OP_CXX_ABI=${OP_CXX_ABI} ") -#endif() -# message (STATUS "No set OP_CXX_ABI=${OP_CXX_ABI} ") -# # message the cxx_abi used during compiling -# if (${OP_CXX_ABI} EQUAL 0) -# message (STATUS "Set GLIBCXX_USE_CXX_ABI=0 when compiling ops") -# else () -# set (OP_CXX_ABI 1) -# message (STATUS "Set GLIBCXX_USE_CXX_ABI=1 when compiling ops") -# endif () +if (BUILD_PY_IF) +if (NOT DEFINED OP_CXX_ABI) + if (BUILD_PY_IF) + if (DEFINED TENSORFLOW_ROOT) + set(FIND_ABI_CMD "import sys,os; sys.path.insert(0, os.path.join('${TENSORFLOW_ROOT}', '..')); import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')" ) + else() + set(FIND_ABI_CMD "import tensorflow; print(tensorflow.CXX11_ABI_FLAG if 'CXX11_ABI_FLAG' in tensorflow.__dict__ else tensorflow.sysconfig.CXX11_ABI_FLAG, end = '')") + endif() + execute_process( + COMMAND ${PYTHON_EXECUTABLE} "-c" "${FIND_ABI_CMD}" + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + OUTPUT_VARIABLE PY_CXX_ABI + RESULT_VARIABLE PY_CXX_ABI_RESULT_VAR + ERROR_VARIABLE PY_CXX_ABI_ERROR_VAR + ) + if (NOT ${PY_CXX_ABI_RESULT_VAR} EQUAL 0) + message(FATAL_ERROR "Cannot determine cxx abi, error message: ${PY_CXX_ABI_ERROR_VAR}") + endif() + set(OP_CXX_ABI ${PY_CXX_ABI}) + endif() + if (BUILD_CPP_IF) + try_run( + CPP_CXX_ABI_RUN_RESULT_VAR CPP_CXX_ABI_COMPILE_RESULT_VAR + ${CMAKE_CURRENT_BINARY_DIR}/tf_cxx_abi + "${CMAKE_CURRENT_SOURCE_DIR}/cmake/tf_cxx_abi.cpp" + LINK_LIBRARIES ${TensorFlowFramework_LIBRARY} + CMAKE_FLAGS "-DINCLUDE_DIRECTORIES:STRING=${TensorFlow_INCLUDE_DIRS}" + RUN_OUTPUT_VARIABLE CPP_CXX_ABI + COMPILE_OUTPUT_VARIABLE CPP_CXX_ABI_COMPILE_OUTPUT_VAR + ) + if (NOT ${CPP_CXX_ABI_COMPILE_RESULT_VAR}) + message(FATAL_ERROR "Failed to compile: \n ${CPP_CXX_ABI_COMPILE_OUTPUT_VAR}" ) + endif() + if (NOT ${CPP_CXX_ABI_RUN_RESULT_VAR} EQUAL "0") + message(FATAL_ERROR "Failed to run, return code: ${CPP_CXX_ABI}" ) + endif() + if (DEFINED PY_CXX_ABI) + if (NOT (${CPP_CXX_ABI} EQUAL ${PY_CXX_ABI})) + message (WARNNING "NOT consistent CXX_ABIs: python interface of tf uses ${PY_CXX_ABI}, while c++ interface of tf uses ${CPP_CXX_ABI}, we follow c++ interface ") + endif() + endif() + set(OP_CXX_ABI ${CPP_CXX_ABI}) + endif() + message (STATUS "Automatically determined OP_CXX_ABI=${OP_CXX_ABI} ") +else() + message (STATUS "User set OP_CXX_ABI=${OP_CXX_ABI} ") +endif() +message (STATUS "No set OP_CXX_ABI=${OP_CXX_ABI} ") +# message the cxx_abi used during compiling +if (${OP_CXX_ABI} EQUAL 0) + message (STATUS "Set GLIBCXX_USE_CXX_ABI=0 when compiling ops") +else () + set (OP_CXX_ABI 1) + message (STATUS "Set GLIBCXX_USE_CXX_ABI=1 when compiling ops") +endif () +endif() # define USE_TTM if (NOT DEFINED USE_TTM) @@ -193,7 +196,7 @@ endif (BUILD_CPP_IF) # include include_directories(${DeePMD_INCLUDE_DIRS}) -#include_directories(${TensorFlow_INCLUDE_DIRS}) +include_directories(${TensorFlow_INCLUDE_DIRS}) # define names of libs set (LIB_DEEPMD "deepmd") @@ -216,6 +219,7 @@ endif (BUILD_CPP_IF) # add_subdirectory (op/) add_subdirectory (lib/) if (BUILD_PY_IF) + add_subdirectory (op/) add_subdirectory (config/) # add_subdirectory (tests/) endif (BUILD_PY_IF) diff --git a/source/cmake/FindFluid.cmake b/source/cmake/FindFluid.cmake index 39e60c0d2e..fcafb90349 100755 --- a/source/cmake/FindFluid.cmake +++ b/source/cmake/FindFluid.cmake @@ -127,6 +127,7 @@ function(third_party_library TARGET_NAME TARGET_DIRNAME) set(PADDLE_THIRD_PARTY_LIBRARIES ${PADDLE_THIRD_PARTY_LIBRARIES} ${local_third_party_libraries} PARENT_SCOPE) endfunction() +if(NOT BUILD_PY_IF) set(OP_DIR "${PROJECT_SOURCE_DIR}/op/paddle_ops/srcs") if(USE_CUDA_TOOLKIT) file(GLOB CUSTOM_OPERATOR_FILES ${OP_DIR}/*.cc) @@ -135,6 +136,7 @@ else() # set(CUSTOM_OPERATOR_FILES "${OP_DIR}/pd_prod_env_mat_multi_devices_cpu.cc;${OP_DIR}/pd_prod_force_se_a_multi_devices_cpu.cc;${OP_DIR}/pd_prod_virial_se_a_multi_devices_cpu.cc;") endif() add_library(pd_infer_custom_op SHARED ${CUSTOM_OPERATOR_FILES}) +endif() third_party_library(mklml ${THIRD_PARTY_ROOT}/install/mklml/lib libiomp5.so libmklml_intel.so) third_party_library(mkldnn ${THIRD_PARTY_ROOT}/install/mkldnn/lib libmkldnn.so.0) diff --git a/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cpu.cc b/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cpu.cc index 7affd494e6..334aa0f523 100755 --- a/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cpu.cc +++ b/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cpu.cc @@ -1,5 +1,9 @@ -//#include "paddle/extension.h" +#ifdef ON_INFER #include "paddle/include/experimental/ext_all.h" +#else +#include "paddle/extension.h" +#endif +// #include "paddle/include/experimental/ext_all.h" #include "utilities.h" #include "coord.h" #include "region.h" diff --git a/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cuda.cc b/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cuda.cc index ee714a2d1b..1f8f1e358a 100644 --- a/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cuda.cc +++ b/source/op/paddle_ops/srcs/pd_prod_env_mat_multi_devices_cuda.cc @@ -1,5 +1,9 @@ #define GOOGLE_CUDA 1 +#ifdef ON_INFER +#include "paddle/include/experimental/ext_all.h" +#else #include "paddle/extension.h" +#endif #include "utilities.h" #include "coord.h" #include "region.h" diff --git a/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cpu.cc b/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cpu.cc index f2a6482772..efccc010c2 100755 --- a/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cpu.cc +++ b/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cpu.cc @@ -1,8 +1,11 @@ #include #include "prod_force.h" #include "prod_force_grad.h" -//#include "paddle/extension.h" +#ifdef ON_INFER #include "paddle/include/experimental/ext_all.h" +#else +#include "paddle/extension.h" +#endif #define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") #define CHECK_INPUT_READY(x) PD_CHECK(x.is_initialized(), #x " must be initialized before usage.") diff --git a/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cuda.cc b/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cuda.cc index 752646d5f2..3d36d8cc8e 100644 --- a/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cuda.cc +++ b/source/op/paddle_ops/srcs/pd_prod_force_se_a_multi_devices_cuda.cc @@ -1,6 +1,10 @@ #define GOOGLE_CUDA 1 #include "prod_force.h" +#ifdef ON_INFER +#include "paddle/include/experimental/ext_all.h" +#else #include "paddle/extension.h" +#endif #include #define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.") diff --git a/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cpu.cc b/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cpu.cc index c4095d1985..3f76426dfc 100755 --- a/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cpu.cc +++ b/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cpu.cc @@ -1,8 +1,11 @@ #include #include "prod_virial.h" #include "prod_virial_grad.h" -//#include "paddle/extension.h" +#ifdef ON_INFER #include "paddle/include/experimental/ext_all.h" +#else +#include "paddle/extension.h" +#endif #define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") diff --git a/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cuda.cc b/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cuda.cc index 8163e636f3..da32032b30 100644 --- a/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cuda.cc +++ b/source/op/paddle_ops/srcs/pd_prod_virial_se_a_multi_devices_cuda.cc @@ -1,7 +1,11 @@ #define GOOGLE_CUDA 1 #include #include "prod_virial.h" +#ifdef ON_INFER +#include "paddle/include/experimental/ext_all.h" +#else #include "paddle/extension.h" +#endif #define CHECK_INPUT(x) PD_CHECK(x.place() == paddle::PlaceType::kGPU, #x " must be a GPU Tensor.") #define CHECK_INPUT_DIM(x, value) PD_CHECK(x.shape().size() == value, #x "'s dim should be " #value ".")