Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove use-grappler-optimizer compile option #799

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 0 additions & 17 deletions build_ngtf.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,11 +96,6 @@ def main():
default='',
action="store_true")

parser.add_argument(
'--use_grappler_optimizer',
help="Use Grappler optimizer instead of the optimization passes\n",
action="store_true")

parser.add_argument(
'--artifacts_dir',
type=str,
Expand Down Expand Up @@ -387,11 +382,6 @@ def main():
"tensorflow")
])

ngraph_tf_cmake_flags.extend([
"-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER=" +
flag_string_map[arguments.use_grappler_optimizer]
])

# Now build the bridge
ng_tf_whl = build_ngraph_tf(build_dir, artifacts_location,
ngraph_tf_src_dir, venv_dir,
Expand Down Expand Up @@ -442,13 +432,6 @@ def main():
install_ngraph_tf(tf_version, venv_dir,
os.path.join(artifacts_location, ng_tf_whl))

if arguments.use_grappler_optimizer:
import tensorflow as tf
import ngraph_bridge
if not ngraph_bridge.is_grappler_enabled():
raise Exception(
"Build failed: 'use_grappler_optimizer' specified but not used")

print('\033[1;32mBuild successful\033[0m')
os.chdir(pwd)

Expand Down
4 changes: 0 additions & 4 deletions examples/cpp/infer_multiple_networks.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,6 @@ void PrintVersion() {
<< std::endl;
std::cout << "CXX11_ABI Used: " << tf::ngraph_bridge::cxx11_abi_flag()
<< std::endl;
std::cout << "Grappler Enabled? "
<< (tf::ngraph_bridge::is_grappler_enabled() ? std::string("Yes")
: std::string("No"))
<< std::endl;
PrintAvailableBackends();
}

Expand Down
4 changes: 0 additions & 4 deletions examples/cpp/infer_single_network.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,6 @@ void PrintVersion() {
<< std::endl;
std::cout << "CXX11_ABI Used: " << tf::ngraph_bridge::cxx11_abi_flag()
<< std::endl;
std::cout << "Grappler Enabled? "
<< (tf::ngraph_bridge::is_grappler_enabled() ? std::string("Yes")
: std::string("No"))
<< std::endl;
PrintAvailableBackends();
}

Expand Down
20 changes: 1 addition & 19 deletions examples/cpp/inference_engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,26 +106,8 @@ Status InferenceEngine::CreateSession(const string& graph_filename,
->set_constant_folding(RewriterConfig::OFF);
options.config.set_inter_op_parallelism_threads(2);

// The following is related to Grappler - which we are turning off
// Until we get a library fully running
if (tf::ngraph_bridge::is_grappler_enabled()) {
auto* custom_config = options.config.mutable_graph_options()
->mutable_rewrite_options()
->add_custom_optimizers();

custom_config->set_name("ngraph-optimizer");
options.config.mutable_graph_options()
->mutable_rewrite_options()
->set_min_graph_nodes(-1);

options.config.mutable_graph_options()
->mutable_rewrite_options()
->set_meta_optimizer_iterations(tensorflow::RewriterConfig::ONE);
}

// Load the network
Status load_graph_status = LoadGraph(graph_filename, &session, options);
return load_graph_status;
return LoadGraph(graph_filename, &session, options);
}

} // namespace benchmark
2 changes: 1 addition & 1 deletion maint/apply-code-format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ set -u
# limitations under the License.
# ******************************************************************************

declare SRC_DIRS=${1:-ngraph_bridge examples test logging tools diagnostics python}
declare SRC_DIRS=${1:-ngraph_bridge examples test tools diagnostics python}

# NOTE: The results of `clang-format` depend _both_ of the following factors:
# - The `.clang-format` file, and
Expand Down
2 changes: 1 addition & 1 deletion maint/check-code-format.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ set -u
# limitations under the License.
# ******************************************************************************

declare SRC_DIRS=${1:-ngraph_bridge examples experimental test logging tools diagnostics python}
declare SRC_DIRS=${1:-ngraph_bridge examples experimental test tools diagnostics python}

# NOTE: The results of `clang-format` depend _both_ of the following factors:
# - The `.clang-format` file, and
Expand Down
7 changes: 1 addition & 6 deletions ngraph_bridge/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ set(SRC
mark_for_clustering.cc
ngraph_builder.cc
ngraph_conversions.cc
ngraph_optimizer.cc
ngraph_rewrite_pass.cc
ops/ngraph_encapsulate_op.cc
pass/transpose_sinking.cc
Expand All @@ -52,12 +53,6 @@ set(SRC
version.cc
)

message(STATUS "NGRAPH_TF_USE_GRAPPLER_OPTIMIZER: ${NGRAPH_TF_USE_GRAPPLER_OPTIMIZER}")
if(NGRAPH_TF_USE_GRAPPLER_OPTIMIZER)
list(APPEND SRC ngraph_optimizer.cc)
add_definitions(-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER)
endif()

add_library(${LIB_NAME} SHARED ${SRC})
target_link_libraries(
${LIB_NAME}
Expand Down
13 changes: 11 additions & 2 deletions ngraph_bridge/ngraph_rewrite_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/public/session_options.h"

#include "api.h"
#include "assign_clusters.h"
Expand Down Expand Up @@ -101,6 +102,16 @@ Status NGraphRewritePass::Rewrite(
}

Status NGraphRewritePass::Run(const GraphOptimizationPassOptions& options) {
auto& rewriter_config =
options.session_options->config.graph_options().rewrite_options();
for (int i = 0; i < rewriter_config.custom_optimizers_size(); ++i) {
if (rewriter_config.custom_optimizers(i).name() == "ngraph-optimizer") {
NGRAPH_VLOG(3) << "ngraph-optimizer custom optimizer enabled; Disabling "
"nGraph rewrite-pass";
return Status::OK();
}
}

// If we don't get a main graph, log that fact and bail.
if (options.graph == nullptr) {
NGRAPH_VLOG(0) << "NGraphRewritePass: options.graph == nullptr";
Expand All @@ -111,8 +122,6 @@ Status NGraphRewritePass::Run(const GraphOptimizationPassOptions& options) {

} // namespace ngraph_bridge

#ifndef NGRAPH_TF_USE_GRAPPLER_OPTIMIZER
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 0,
ngraph_bridge::NGraphRewritePass);
#endif
} // namespace tensorflow
8 changes: 0 additions & 8 deletions ngraph_bridge/version.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,6 @@ int cxx11_abi_flag() {
#endif
}

bool is_grappler_enabled() {
#if defined(NGRAPH_TF_USE_GRAPPLER_OPTIMIZER)
return true;
#else
return false;
#endif
}

const char* tf_version() { return (TF_VERSION_STRING); }

} // namespace ngraph_bridge
Expand Down
4 changes: 0 additions & 4 deletions ngraph_bridge/version.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,6 @@ const char* ngraph_version();
// _GLIBCXX_USE_CXX11_ABI set during the compilation time
int cxx11_abi_flag();

// Returns true when nGraph is using Grappler optimizer APIs for
// graph rewriting
bool is_grappler_enabled();

// Returns the tensorflow version
const char* tf_version();
}
Expand Down
63 changes: 28 additions & 35 deletions python/ngraph_bridge/__init__.in.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
'set_backend', 'get_backend',
'start_logging_placement', 'stop_logging_placement',
'is_logging_placement', '__version__', 'cxx11_abi_flag'
'is_grappler_enabled', 'update_config',
'update_config',
'set_disabled_ops', 'get_disabled_ops',
]

Expand Down Expand Up @@ -123,7 +123,6 @@ def requested():
ngraph_bridge_lib.version.restype = ctypes.c_char_p
ngraph_bridge_lib.ngraph_version.restype = ctypes.c_char_p
ngraph_bridge_lib.cxx11_abi_flag.restype = ctypes.c_int
ngraph_bridge_lib.is_grappler_enabled.restype = ctypes.c_bool
ngraph_bridge_lib.set_disabled_ops.argtypes = [ctypes.c_char_p]
ngraph_bridge_lib.get_disabled_ops.restype = ctypes.c_char_p

Expand Down Expand Up @@ -171,38 +170,33 @@ def is_logging_placement():
def cxx11_abi_flag():
return ngraph_bridge_lib.cxx11_abi_flag()

def is_grappler_enabled():
return ngraph_bridge_lib.is_grappler_enabled()

def update_config(config, backend_name = "CPU", device_id = ""):
#updating session config if grappler is enabled
if(ngraph_bridge_lib.is_grappler_enabled()):
opt_name = 'ngraph-optimizer'
# If the config already has ngraph-optimizer, then do not update it
if config.HasField('graph_options'):
if config.graph_options.HasField('rewrite_options'):
custom_opts = config.graph_options.rewrite_options.custom_optimizers
for i in range(len(custom_opts)):
if custom_opts[i].name == opt_name:
return config
rewriter_options = rewriter_config_pb2.RewriterConfig()
rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
rewriter_options.min_graph_nodes=-1
ngraph_optimizer = rewriter_options.custom_optimizers.add()
ngraph_optimizer.name = opt_name
ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
# For reference, if we want to provide configuration support(backend parameters)
# in a python script using the ngraph-optimizer
# rewriter_options = rewriter_config_pb2.RewriterConfig()
# rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
# rewriter_options.min_graph_nodes=-1
# ngraph_optimizer = rewriter_options.custom_optimizers.add()
# ngraph_optimizer.name = "ngraph-optimizer"
# ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
# ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
# ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
# config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
opt_name = 'ngraph-optimizer'
# If the config already has ngraph-optimizer, then do not update it
if config.HasField('graph_options'):
if config.graph_options.HasField('rewrite_options'):
custom_opts = config.graph_options.rewrite_options.custom_optimizers
for i in range(len(custom_opts)):
if custom_opts[i].name == opt_name:
return config
rewriter_options = rewriter_config_pb2.RewriterConfig()
rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
rewriter_options.min_graph_nodes=-1
ngraph_optimizer = rewriter_options.custom_optimizers.add()
ngraph_optimizer.name = opt_name
ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
# For reference, if we want to provide configuration support(backend parameters)
# in a python script using the ngraph-optimizer
# rewriter_options = rewriter_config_pb2.RewriterConfig()
# rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
# rewriter_options.min_graph_nodes=-1
# ngraph_optimizer = rewriter_options.custom_optimizers.add()
# ngraph_optimizer.name = "ngraph-optimizer"
# ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
# ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
# ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
# config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
return config

def set_disabled_ops(unsupported_ops):
Expand All @@ -215,5 +209,4 @@ def get_disabled_ops():
"nGraph bridge version: " + str(ngraph_bridge_lib.version()) + "\n" + \
"nGraph version used for this build: " + str(ngraph_bridge_lib.ngraph_version()) + "\n" + \
"TensorFlow version used for this build: " + TF_GIT_VERSION_BUILT_WITH + "\n" \
"CXX11_ABI flag used for this build: " + str(ngraph_bridge_lib.cxx11_abi_flag()) + "\n" \
"nGraph bridge built with Grappler: " + str(ngraph_bridge_lib.is_grappler_enabled()) + "\n" \
"CXX11_ABI flag used for this build: " + str(ngraph_bridge_lib.cxx11_abi_flag()) + "\n"
7 changes: 2 additions & 5 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,9 @@ set(SRC
graph_rewrites/assign_clusters.cc
graph_rewrites/deadness_test.cc
graph_rewrites/backend_manager_test.cc
graph_rewrites/encapsulate_clusters_test.cc
graph_rewrites/config_for_grappler_test.cc
graph_rewrites/disable_ops_test.cc
graph_rewrites/encapsulate_clusters_test.cc
graph_rewrites/mark_for_clustering_test.cc
graph_rewrites/op_by_op_capability_test.cc
test_utilities.cpp
Expand All @@ -85,10 +86,6 @@ set(SRC
pass/transpose_sinking_test.cpp
)

if(NGRAPH_TF_USE_GRAPPLER_OPTIMIZER)
list(APPEND SRC graph_rewrites/config_for_grappler_test.cc)
endif()

# The compile flag -DNDEBUG is required since
# tensorflow::Core::RefCounted is error prone as explained here:
# https://github.com/tensorflow/tensorflow/issues/17316
Expand Down
7 changes: 6 additions & 1 deletion test/graph_rewrites/config_for_grappler_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ namespace testing {
// etc.,etc.

TEST(GrapplerConfig, RConfig1) {
ActivateNGraph();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why is the explicit enabling/disabling of nGraph required?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's to disable de-assigning of clusters.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where? To run the entire unit test suite? I'm not sure why this isn't being done already.

// Create Graph
Scope root = Scope::NewRootScope();
auto A = ops::Const(root.WithOpName("A"), {3.f, 2.f});
Expand Down Expand Up @@ -72,7 +73,6 @@ TEST(GrapplerConfig, RConfig1) {
// Run grappler
tensorflow::grappler::MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
// const Status status = optimizer.Optimize(nullptr, item, &output);
ASSERT_OK(optimizer.Optimize(nullptr, item, &output));

// GraphDef to Graph
Expand All @@ -91,9 +91,11 @@ TEST(GrapplerConfig, RConfig1) {
ng_encap = node;
}
ASSERT_NE(ng_encap, nullptr);
DeactivateNGraph();
}

TEST(GrapplerConfig, RConfig4) {
ActivateNGraph();
// Create Graph
Scope root = Scope::NewRootScope();
auto A = ops::Const(root.WithOpName("A"), {3.f, 2.f});
Expand Down Expand Up @@ -152,11 +154,13 @@ TEST(GrapplerConfig, RConfig4) {
string ng_test_echo;
ASSERT_OK(GetNodeAttr(ng_encap->attrs(), "_ngraph_test_echo", &ng_test_echo));
ASSERT_EQ(ng_test_echo, "hi");
DeactivateNGraph();
}

// Test the failure case where the compulsory attribute device_id
// is not provided using the rewriter config
TEST(GrapplerConfig, RConfig5) {
ActivateNGraph();
// Create Graph
Scope root = Scope::NewRootScope();
auto A = ops::Const(root.WithOpName("A"), {3.f, 2.f});
Expand Down Expand Up @@ -192,6 +196,7 @@ TEST(GrapplerConfig, RConfig5) {
GraphDef output;

ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
DeactivateNGraph();
}

} // namespace testing
Expand Down
3 changes: 1 addition & 2 deletions test/python/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,8 @@ def with_ngraph(self, l, config=None):
# because mutable objects should not be used as defaults in python
if config is None:
config = tf.compat.v1.ConfigProto()
# TODO: Stop grappler on failure (Add fail_on_optimizer_errors=True)
config = ngraph_bridge.update_config(config)

# config = ngraph_bridge.update_config(config)
kanvi-nervana marked this conversation as resolved.
Show resolved Hide resolved
ngraph_tf_disable_deassign_clusters = os.environ.pop(
'NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS', None)

Expand Down
Loading