Unverified Commit 4f84a69a authored by Hongkun Yu's avatar Hongkun Yu Committed by GitHub
Browse files

Delete models:syntaxnet (#8170)

* remove tensorrt as the example repo has been moved for a while

* delete syntax net to reduce repo size as syntax net is going to move to google-research/

* Update README.md

Delete syntaxnet from readme.

* Update CODEOWNERS

Delete syntaxnet from codeowners
parent d2e30aef
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#ifndef DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_
#define DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_
#include <vector>
#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h"
#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h"
#include "dragnn/components/util/bulk_feature_extractor.h"
#include "dragnn/core/beam.h"
#include "dragnn/core/input_batch_cache.h"
#include "dragnn/core/interfaces/component.h"
#include "dragnn/core/interfaces/transition_state.h"
#include "dragnn/core/util/label.h"
#include "dragnn/protos/data.pb.h"
#include "dragnn/protos/spec.pb.h"
#include "dragnn/protos/trace.pb.h"
#include "syntaxnet/base.h"
#include "syntaxnet/parser_transitions.h"
#include "syntaxnet/registry.h"
#include "syntaxnet/task_context.h"
namespace syntaxnet {
namespace dragnn {
class SyntaxNetComponent : public Component {
public:
// Create a SyntaxNet-backed DRAGNN component.
SyntaxNetComponent();
// Initializes this component from the spec.
void InitializeComponent(const ComponentSpec &spec) override;
// Provides the previous beam to the component.
void InitializeData(
const std::vector<std::vector<const TransitionState *>> &states,
int max_beam_size, InputBatchCache *input_data) override;
// Returns true if the component has had InitializeData called on it since
// the last time it was reset.
bool IsReady() const override;
// Returns the string name of this component.
string Name() const override;
// Returns the number of steps taken by the given batch in this component.
int StepsTaken(int batch_index) const override;
// Returns the current batch size of the component's underlying data.
int BatchSize() const override;
// Returns the maximum beam size of this component.
int BeamSize() const override;
// Return the beam index of the item which is currently at index
// 'index', when the beam was at step 'step', for batch element 'batch'.
int GetBeamIndexAtStep(int step, int current_index, int batch) const override;
// Return the source index of the item which is currently at index 'index'
// for batch element 'batch'. This index is into the final beam of the
// Component that this Component was initialized from.
int GetSourceBeamIndex(int current_index, int batch) const override;
// Request a translation function based on the given method string.
// The translation function will be called with arguments (batch, beam, value)
// and should return the step index corresponding to the given value, for the
// data in the given beam and batch.
std::function<int(int, int, int)> GetStepLookupFunction(
const string &method) override;
// Advances this component from the given transition matrix.Returns false
// if the component could not be advanced.
bool AdvanceFromPrediction(const float *transition_matrix, int num_items,
int num_actions) override;
// Advances this component from the state oracles.
void AdvanceFromOracle() override;
// Returns true if all states within this component are terminal.
bool IsTerminal() const override;
// Returns the current batch of beams for this component.
std::vector<std::vector<const TransitionState *>> GetBeam() override;
// Extracts and populates the vector of FixedFeatures for the specified
// channel.
int GetFixedFeatures(std::function<int32 *(int)> allocate_indices,
std::function<int64 *(int)> allocate_ids,
std::function<float *(int)> allocate_weights,
int channel_id) const override;
// Extracts and populates all FixedFeatures for all channels, advancing this
// component via the oracle until it is terminal.
int BulkGetFixedFeatures(const BulkFeatureExtractor &extractor) override;
void BulkEmbedFixedFeatures(
int batch_size_padding, int num_steps_padding, int output_array_size,
const vector<const float *> &per_channel_embeddings,
float *embedding_matrix) override {
LOG(FATAL) << "Method not supported";
}
void BulkEmbedDenseFixedFeatures(
const vector<const float *> &per_channel_embeddings,
float *embedding_output, int embedding_output_size,
int32 *offset_array_output, int offset_array_size) override {
LOG(FATAL) << "Method not supported";
}
int BulkDenseFeatureSize() const override {
LOG(FATAL) << "Method not supported";
}
// Extracts and returns the vector of LinkFeatures for the specified
// channel. Note: these are NOT translated.
std::vector<LinkFeatures> GetRawLinkFeatures(int channel_id) const override;
// Returns a vector of oracle labels for each element in the beam and
// batch.
std::vector<std::vector<std::vector<Label>>> GetOracleLabels() const override;
// Annotate the underlying data object with the results of this Component's
// calculation.
void FinalizeData() override;
// Reset this component.
void ResetComponent() override;
// Initializes the component for tracing execution. This will typically have
// the side effect of slowing down all subsequent Component calculations
// and storing a trace in memory that can be returned by GetTraceProtos().
void InitializeTracing() override;
// Disables tracing, freeing any additional memory and avoiding triggering
// additional computation in the future.
void DisableTracing() override;
std::vector<std::vector<ComponentTrace>> GetTraceProtos() const override;
void AddTranslatedLinkFeaturesToTrace(
const std::vector<LinkFeatures> &features, int channel_id) override;
private:
friend class SyntaxNetComponentTest;
friend class SyntaxNetTransitionStateTest;
// Permission function for this component.
bool IsAllowed(SyntaxNetTransitionState *state, int action) const;
// Returns true if this state is final
bool IsFinal(SyntaxNetTransitionState *state) const;
// Oracle function for this component.
std::vector<int> GetOracleVector(SyntaxNetTransitionState *state) const;
// State advance function for this component.
void Advance(SyntaxNetTransitionState *state, int action,
Beam<SyntaxNetTransitionState> *beam);
// Creates a new state for the given example.
std::unique_ptr<SyntaxNetTransitionState> CreateState(
SyntaxNetSentence *example);
// Creates a newly initialized Beam.
std::unique_ptr<Beam<SyntaxNetTransitionState>> CreateBeam(int max_size);
// Transition system.
std::unique_ptr<ParserTransitionSystem> transition_system_;
// Label map for transition system.
const TermFrequencyMap *label_map_;
// Extractor for fixed features
ParserEmbeddingFeatureExtractor feature_extractor_;
// Extractor for linked features.
SyntaxNetLinkFeatureExtractor link_feature_extractor_;
// Internal workspace registry for use in feature extraction.
WorkspaceRegistry workspace_registry_;
// Switch for simulating legacy parser behaviour.
bool rewrite_root_labels_;
// The ComponentSpec used to initialize this component.
ComponentSpec spec_;
// State search beams
std::vector<std::unique_ptr<Beam<SyntaxNetTransitionState>>> batch_;
// Current max beam size.
int max_beam_size_;
// Underlying input data.
InputBatchCache *input_data_;
// Whether or not to trace for each batch and beam element.
bool do_tracing_ = false;
};
} // namespace dragnn
} // namespace syntaxnet
#endif // DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_COMPONENT_H_
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h"
#include "tensorflow/core/platform/logging.h"
namespace syntaxnet {
namespace dragnn {
void SyntaxNetLinkFeatureExtractor::Setup(TaskContext *context) {
ParserEmbeddingFeatureExtractor::Setup(context);
if (NumEmbeddings() > 0) {
channel_sources_ = utils::Split(
context->Get(
tensorflow::strings::StrCat(ArgPrefix(), "_", "source_components"),
""),
';');
channel_layers_ = utils::Split(
context->Get(
tensorflow::strings::StrCat(ArgPrefix(), "_", "source_layers"), ""),
';');
channel_translators_ = utils::Split(
context->Get(
tensorflow::strings::StrCat(ArgPrefix(), "_", "source_translators"),
""),
';');
}
CHECK_EQ(channel_sources_.size(), NumEmbeddings());
CHECK_EQ(channel_layers_.size(), NumEmbeddings());
CHECK_EQ(channel_translators_.size(), NumEmbeddings());
}
void SyntaxNetLinkFeatureExtractor::AddLinkedFeatureChannelProtos(
ComponentSpec *spec) const {
for (int embedding_idx = 0; embedding_idx < NumEmbeddings();
++embedding_idx) {
LinkedFeatureChannel *channel = spec->add_linked_feature();
channel->set_name(embedding_name(embedding_idx));
channel->set_fml(embedding_fml()[embedding_idx]);
channel->set_embedding_dim(EmbeddingDims(embedding_idx));
channel->set_size(FeatureSize(embedding_idx));
channel->set_source_layer(channel_layers_[embedding_idx]);
channel->set_source_component(channel_sources_[embedding_idx]);
channel->set_source_translator(channel_translators_[embedding_idx]);
}
}
} // namespace dragnn
} // namespace syntaxnet
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#ifndef DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_
#define DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_
#include <string>
#include <vector>
#include "dragnn/protos/spec.pb.h"
#include "syntaxnet/embedding_feature_extractor.h"
#include "syntaxnet/parser_state.h"
#include "syntaxnet/parser_transitions.h"
#include "syntaxnet/task_context.h"
namespace syntaxnet {
namespace dragnn {
// Provides feature extraction for linked features in the
// WrapperParserComponent. This re-uses the EmbeddingFeatureExtractor
// architecture to get another set of feature extractors.
//
// Because it uses a different prefix, it can be executed in the same wf.stage
// as the regular fixed extractor.
class SyntaxNetLinkFeatureExtractor : public ParserEmbeddingFeatureExtractor {
public:
SyntaxNetLinkFeatureExtractor() : ParserEmbeddingFeatureExtractor("link") {}
~SyntaxNetLinkFeatureExtractor() override {}
const string ArgPrefix() const override { return "link"; }
// Parses the TaskContext to get additional information like target layers,
// etc.
void Setup(TaskContext *context) override;
// Called during InitComponentProtoTask to add the specification from the
// wrapped feature extractor as LinkedFeatureChannel protos.
void AddLinkedFeatureChannelProtos(ComponentSpec *spec) const;
private:
// Source component names for each channel.
std::vector<string> channel_sources_;
// Source layer names for each channel.
std::vector<string> channel_layers_;
// Source translator name for each channel.
std::vector<string> channel_translators_;
};
} // namespace dragnn
} // namespace syntaxnet
#endif // DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_LINK_FEATURE_EXTRACTOR_H_
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include "dragnn/components/syntaxnet/syntaxnet_link_feature_extractor.h"
#include <string>
#include "dragnn/core/test/generic.h"
#include "dragnn/protos/spec.pb.h"
#include "syntaxnet/task_context.h"
#include "tensorflow/core/platform/test.h"
using syntaxnet::test::EqualsProto;
namespace syntaxnet {
namespace dragnn {
class ExportSpecTest : public ::testing::Test {
public:
};
TEST_F(ExportSpecTest, WritesChannelSpec) {
TaskContext context;
context.SetParameter("link_features", "input.focus;stack.focus");
context.SetParameter("link_embedding_names", "tagger;parser");
context.SetParameter("link_predicate_maps", "none;none");
context.SetParameter("link_embedding_dims", "16;16");
context.SetParameter("link_source_components", "tagger;parser");
context.SetParameter("link_source_layers", "hidden0;lstm");
context.SetParameter("link_source_translators", "token;last_action");
SyntaxNetLinkFeatureExtractor link_features;
link_features.Setup(&context);
link_features.Init(&context);
ComponentSpec spec;
link_features.AddLinkedFeatureChannelProtos(&spec);
const string expected_spec_str = R"(
linked_feature {
name: "tagger"
fml: "input.focus"
embedding_dim: 16
size: 1
source_component: "tagger"
source_translator: "token"
source_layer: "hidden0"
}
linked_feature {
name: "parser"
fml: "stack.focus"
embedding_dim: 16
size: 1
source_component: "parser"
source_translator: "last_action"
source_layer: "lstm"
}
)";
ComponentSpec expected_spec;
TextFormat::ParseFromString(expected_spec_str, &expected_spec);
EXPECT_THAT(spec, EqualsProto(expected_spec));
}
} // namespace dragnn
} // namespace syntaxnet
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace syntaxnet {
namespace dragnn {
SyntaxNetTransitionState::SyntaxNetTransitionState(
std::unique_ptr<ParserState> parser_state, SyntaxNetSentence *sentence)
: parser_state_(std::move(parser_state)),
sentence_(sentence),
is_gold_(false) {
score_ = 0;
current_beam_index_ = -1;
parent_beam_index_ = 0;
step_for_token_.resize(sentence->sentence()->token_size(), -1);
parent_for_token_.resize(sentence->sentence()->token_size(), -1);
parent_step_for_token_.resize(sentence->sentence()->token_size(), -1);
}
void SyntaxNetTransitionState::Init(const TransitionState &parent) {
score_ = parent.GetScore();
parent_beam_index_ = parent.GetBeamIndex();
}
std::unique_ptr<SyntaxNetTransitionState> SyntaxNetTransitionState::Clone()
const {
// Create a new state from a clone of the underlying parser state.
std::unique_ptr<ParserState> cloned_state(parser_state_->Clone());
std::unique_ptr<SyntaxNetTransitionState> new_state(
new SyntaxNetTransitionState(std::move(cloned_state), sentence_));
// Copy relevant data members and set non-copied ones to flag values.
new_state->score_ = score_;
new_state->current_beam_index_ = current_beam_index_;
new_state->parent_beam_index_ = parent_beam_index_;
new_state->step_for_token_ = step_for_token_;
new_state->parent_step_for_token_ = parent_step_for_token_;
new_state->parent_for_token_ = parent_for_token_;
// Copy trace if it exists.
if (trace_) {
new_state->trace_.reset(new ComponentTrace(*trace_));
}
return new_state;
}
int SyntaxNetTransitionState::ParentBeamIndex() const {
return parent_beam_index_;
}
int SyntaxNetTransitionState::GetBeamIndex() const {
return current_beam_index_;
}
bool SyntaxNetTransitionState::IsGold() const { return is_gold_; }
void SyntaxNetTransitionState::SetGold(bool is_gold) { is_gold_ = is_gold; }
void SyntaxNetTransitionState::SetBeamIndex(int index) {
current_beam_index_ = index;
}
float SyntaxNetTransitionState::GetScore() const { return score_; }
void SyntaxNetTransitionState::SetScore(float score) { score_ = score; }
string SyntaxNetTransitionState::HTMLRepresentation() const {
// Crude HTML string showing the stack and the word on the input.
string html = "Stack: ";
for (int i = parser_state_->StackSize() - 1; i >= 0; --i) {
const int word_idx = parser_state_->Stack(i);
if (word_idx >= 0) {
tensorflow::strings::StrAppend(
&html, parser_state_->GetToken(word_idx).word(), " ");
}
}
tensorflow::strings::StrAppend(&html, "| Input: ");
const int word_idx = parser_state_->Input(0);
if (word_idx >= 0) {
tensorflow::strings::StrAppend(
&html, parser_state_->GetToken(word_idx).word(), " ");
}
return html;
}
} // namespace dragnn
} // namespace syntaxnet
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#ifndef DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_
#define DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_
#include <vector>
#include "dragnn/core/interfaces/cloneable_transition_state.h"
#include "dragnn/core/interfaces/transition_state.h"
#include "dragnn/io/syntaxnet_sentence.h"
#include "dragnn/protos/trace.pb.h"
#include "syntaxnet/base.h"
#include "syntaxnet/parser_state.h"
namespace syntaxnet {
namespace dragnn {
class SyntaxNetTransitionState
: public CloneableTransitionState<SyntaxNetTransitionState> {
public:
// Creates a SyntaxNetTransitionState to wrap this ParserState.
SyntaxNetTransitionState(std::unique_ptr<ParserState> parser_state,
SyntaxNetSentence *sentence);
// Initializes this TransitionState from a previous TransitionState. The
// ParentBeamIndex is the location of that previous TransitionState in the
// provided beam.
void Init(const TransitionState &parent) override;
// Produces a new state with the same backing data as this state.
std::unique_ptr<SyntaxNetTransitionState> Clone() const override;
// Returns the beam index of the state passed into the initializer of this
// TransitionState.
int ParentBeamIndex() const override;
// Gets the current beam index for this state.
int GetBeamIndex() const override;
// Sets the current beam index for this state.
void SetBeamIndex(int index) override;
// Gets the score associated with this transition state.
float GetScore() const override;
// Sets the score associated with this transition state.
void SetScore(float score) override;
// Gets the state's gold-ness (if it is on or consistent with the oracle path)
bool IsGold() const override;
// Sets the gold-ness of this state.
void SetGold(bool is_gold) override;
// Depicts this state as an HTML-language string.
string HTMLRepresentation() const override;
// **** END INHERITED INTERFACE ****
// TODO(googleuser): Make these comments actually mean something.
// Data accessor.
int step_for_token(int token) {
if (token < 0 || token >= step_for_token_.size()) {
return -1;
} else {
return step_for_token_.at(token);
}
}
// Data setter.
void set_step_for_token(int token, int step) {
step_for_token_.insert(step_for_token_.begin() + token, step);
}
// Data accessor.
int parent_step_for_token(int token) {
if (token < 0 || token >= step_for_token_.size()) {
return -1;
} else {
return parent_step_for_token_.at(token);
}
}
// Data setter.
void set_parent_step_for_token(int token, int parent_step) {
parent_step_for_token_.insert(parent_step_for_token_.begin() + token,
parent_step);
}
// Data accessor.
int parent_for_token(int token) {
if (token < 0 || token >= step_for_token_.size()) {
return -1;
} else {
return parent_for_token_.at(token);
}
}
// Data setter.
void set_parent_for_token(int token, int parent) {
parent_for_token_.insert(parent_for_token_.begin() + token, parent);
}
// Accessor for the underlying ParserState.
ParserState *parser_state() { return parser_state_.get(); }
// Accessor for the underlying sentence object.
SyntaxNetSentence *sentence() { return sentence_; }
ComponentTrace *mutable_trace() {
CHECK(trace_) << "Trace is not initialized";
return trace_.get();
}
void set_trace(std::unique_ptr<ComponentTrace> trace) {
trace_ = std::move(trace);
}
private:
// Underlying ParserState object that is being wrapped.
std::unique_ptr<ParserState> parser_state_;
// Sentence object that is being examined with this state.
SyntaxNetSentence *sentence_;
// The current score of this state.
float score_;
// The current beam index of this state.
int current_beam_index_;
// The parent beam index for this state.
int parent_beam_index_;
// Maintains a list of which steps in the history correspond to
// representations for each of the tokens on the stack.
std::vector<int> step_for_token_;
// Maintains a list of which steps in the history correspond to the actions
// that assigned a parent for tokens when reduced.
std::vector<int> parent_step_for_token_;
// Maintain the parent index of a token in the system.
std::vector<int> parent_for_token_;
// Trace of the history to produce this state.
std::unique_ptr<ComponentTrace> trace_;
// True if this state is gold.
bool is_gold_;
};
} // namespace dragnn
} // namespace syntaxnet
#endif // DRAGNN_COMPONENTS_SYNTAXNET_SYNTAXNET_TRANSITION_STATE_H_
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include "dragnn/components/syntaxnet/syntaxnet_transition_state.h"
#include "dragnn/components/syntaxnet/syntaxnet_component.h"
#include "dragnn/core/input_batch_cache.h"
#include "dragnn/core/test/generic.h"
#include "dragnn/core/test/mock_transition_state.h"
#include "dragnn/io/sentence_input_batch.h"
#include "dragnn/protos/spec.pb.h"
#include "syntaxnet/sentence.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
// This test suite is intended to validate the contracts that the DRAGNN
// system expects from all transition state subclasses. Developers creating
// new TransitionStates should copy this test and modify it as necessary,
// using it to ensure their state conforms to DRAGNN expectations.
namespace syntaxnet {
namespace dragnn {
namespace {
const char kSentence0[] = R"(
token {
word: "Sentence" start: 0 end: 7 tag: "NN" category: "NOUN" label: "ROOT"
break_level: NO_BREAK
}
token {
word: "0" start: 9 end: 9 head: 0 tag: "CD" category: "NUM" label: "num"
break_level: SPACE_BREAK
}
token {
word: "." start: 10 end: 10 head: 0 tag: "." category: "." label: "punct"
break_level: NO_BREAK
}
)";
} // namespace
using testing::Return;
class SyntaxNetTransitionStateTest : public ::testing::Test {
public:
std::unique_ptr<SyntaxNetTransitionState> CreateState() {
// Get the master spec proto from the test data directory.
MasterSpec master_spec;
string file_name = tensorflow::io::JoinPath(
test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata",
"master_spec.textproto");
TF_CHECK_OK(tensorflow::ReadTextProto(tensorflow::Env::Default(), file_name,
&master_spec));
// Get all the resource protos from the test data directory.
for (Resource &resource :
*(master_spec.mutable_component(0)->mutable_resource())) {
resource.mutable_part(0)->set_file_pattern(tensorflow::io::JoinPath(
test::GetTestDataPrefix(), "dragnn/components/syntaxnet/testdata",
resource.part(0).file_pattern()));
}
// Create an empty input batch and beam vector to initialize the parser.
Sentence sentence_0;
TextFormat::ParseFromString(kSentence0, &sentence_0);
string sentence_0_str;
sentence_0.SerializeToString(&sentence_0_str);
data_.reset(new InputBatchCache(sentence_0_str));
SentenceInputBatch *sentences = data_->GetAs<SentenceInputBatch>();
// Create a parser comoponent that will generate a parser state for this
// test.
SyntaxNetComponent component;
component.InitializeComponent(*(master_spec.mutable_component(0)));
std::vector<std::vector<const TransitionState *>> states;
constexpr int kBeamSize = 1;
component.InitializeData(states, kBeamSize, data_.get());
// Get a transition state from the component.
std::unique_ptr<SyntaxNetTransitionState> test_state =
component.CreateState(&(sentences->data()->at(0)));
return test_state;
}
std::unique_ptr<InputBatchCache> data_;
};
// Validates the consistency of the beam index setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetBeamIndex) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr int kOldBeamIndex = 12;
test_state->SetBeamIndex(kOldBeamIndex);
EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex);
constexpr int kNewBeamIndex = 7;
test_state->SetBeamIndex(kNewBeamIndex);
EXPECT_EQ(test_state->GetBeamIndex(), kNewBeamIndex);
}
// Validates the consistency of the score setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetScore) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr float kOldScore = 12.1;
test_state->SetScore(kOldScore);
EXPECT_EQ(test_state->GetScore(), kOldScore);
constexpr float kNewScore = 7.2;
test_state->SetScore(kNewScore);
EXPECT_EQ(test_state->GetScore(), kNewScore);
}
// Validates the consistency of the goldness setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetGold) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr bool kOldGold = true;
test_state->SetGold(kOldGold);
EXPECT_EQ(test_state->IsGold(), kOldGold);
constexpr bool kNewGold = false;
test_state->SetGold(kNewGold);
EXPECT_EQ(test_state->IsGold(), kNewGold);
}
// This test ensures that the initializing state's current index is saved
// as the parent beam index of the state being initialized.
TEST_F(SyntaxNetTransitionStateTest, ReportsParentBeamIndex) {
// Create a mock transition state that wil report a specific current index.
// This index should become the parent state index for the test state.
MockTransitionState mock_state;
constexpr int kParentBeamIndex = 1138;
EXPECT_CALL(mock_state, GetBeamIndex())
.WillRepeatedly(Return(kParentBeamIndex));
auto test_state = CreateState();
test_state->Init(mock_state);
EXPECT_EQ(test_state->ParentBeamIndex(), kParentBeamIndex);
}
// This test ensures that the initializing state's current score is saved
// as the current score of the state being initialized.
TEST_F(SyntaxNetTransitionStateTest, InitializationCopiesParentScore) {
// Create a mock transition state that wil report a specific current index.
// This index should become the parent state index for the test state.
MockTransitionState mock_state;
constexpr float kParentScore = 24.12;
EXPECT_CALL(mock_state, GetScore()).WillRepeatedly(Return(kParentScore));
auto test_state = CreateState();
test_state->Init(mock_state);
EXPECT_EQ(test_state->GetScore(), kParentScore);
}
// This test ensures that calling Clone maintains the state data (parent beam
// index, beam index, score, etc.) of the state that was cloned.
TEST_F(SyntaxNetTransitionStateTest, CloningMaintainsState) {
// Create and initialize the state->
MockTransitionState mock_state;
constexpr int kParentBeamIndex = 1138;
EXPECT_CALL(mock_state, GetBeamIndex())
.WillRepeatedly(Return(kParentBeamIndex));
auto test_state = CreateState();
test_state->Init(mock_state);
// Validate the internal state of the test state.
constexpr float kOldScore = 20.0;
test_state->SetScore(kOldScore);
EXPECT_EQ(test_state->GetScore(), kOldScore);
constexpr int kOldBeamIndex = 12;
test_state->SetBeamIndex(kOldBeamIndex);
EXPECT_EQ(test_state->GetBeamIndex(), kOldBeamIndex);
auto clone = test_state->Clone();
// The clone should have identical state to the old state.
EXPECT_EQ(clone->ParentBeamIndex(), kParentBeamIndex);
EXPECT_EQ(clone->GetScore(), kOldScore);
EXPECT_EQ(clone->GetBeamIndex(), kOldBeamIndex);
}
// Validates the consistency of the step_for_token setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetStepForToken) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr int kStepForTokenZero = 12;
constexpr int kStepForTokenTwo = 34;
test_state->set_step_for_token(0, kStepForTokenZero);
test_state->set_step_for_token(2, kStepForTokenTwo);
// Expect that the set tokens return values and the unset steps return the
// default.
constexpr int kDefaultValue = -1;
EXPECT_EQ(kStepForTokenZero, test_state->step_for_token(0));
EXPECT_EQ(kDefaultValue, test_state->step_for_token(1));
EXPECT_EQ(kStepForTokenTwo, test_state->step_for_token(2));
// Expect that out of bound accesses will return the default. (There are only
// 3 tokens in the backing sentence, so token 3 and greater are out of bound.)
EXPECT_EQ(kDefaultValue, test_state->step_for_token(-1));
EXPECT_EQ(kDefaultValue, test_state->step_for_token(3));
}
// Validates the consistency of the parent_step_for_token setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetParentStepForToken) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr int kStepForTokenZero = 12;
constexpr int kStepForTokenTwo = 34;
test_state->set_parent_step_for_token(0, kStepForTokenZero);
test_state->set_parent_step_for_token(2, kStepForTokenTwo);
// Expect that the set tokens return values and the unset steps return the
// default.
constexpr int kDefaultValue = -1;
EXPECT_EQ(kStepForTokenZero, test_state->parent_step_for_token(0));
EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(1));
EXPECT_EQ(kStepForTokenTwo, test_state->parent_step_for_token(2));
// Expect that out of bound accesses will return the default. (There are only
// 3 tokens in the backing sentence, so token 3 and greater are out of bound.)
EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(-1));
EXPECT_EQ(kDefaultValue, test_state->parent_step_for_token(3));
}
// Validates the consistency of the parent_for_token setter and getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetParentForToken) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
constexpr int kParentForTokenZero = 12;
constexpr int kParentForTokenTwo = 34;
test_state->set_parent_for_token(0, kParentForTokenZero);
test_state->set_parent_for_token(2, kParentForTokenTwo);
// Expect that the set tokens return values and the unset steps return the
// default.
constexpr int kDefaultValue = -1;
EXPECT_EQ(kParentForTokenZero, test_state->parent_for_token(0));
EXPECT_EQ(kDefaultValue, test_state->parent_for_token(1));
EXPECT_EQ(kParentForTokenTwo, test_state->parent_for_token(2));
// Expect that out of bound accesses will return the default. (There are only
// 3 tokens in the backing sentence, so token 3 and greater are out of bound.)
EXPECT_EQ(kDefaultValue, test_state->parent_for_token(-1));
EXPECT_EQ(kDefaultValue, test_state->parent_for_token(3));
}
// Validates the consistency of trace proto setter/getter.
TEST_F(SyntaxNetTransitionStateTest, CanSetAndGetTrace) {
// Create and initialize a test state.
MockTransitionState mock_state;
auto test_state = CreateState();
test_state->Init(mock_state);
const string kTestComponentName = "test";
std::unique_ptr<ComponentTrace> trace;
trace.reset(new ComponentTrace());
trace->set_name(kTestComponentName);
test_state->set_trace(std::move(trace));
EXPECT_EQ(trace.get(), nullptr);
EXPECT_EQ(test_state->mutable_trace()->name(), kTestComponentName);
// Should be preserved when cloing.
auto cloned_state = test_state->Clone();
EXPECT_EQ(cloned_state->mutable_trace()->name(), kTestComponentName);
EXPECT_EQ(test_state->mutable_trace()->name(), kTestComponentName);
}
} // namespace dragnn
} // namespace syntaxnet
component {
name: "parser"
transition_system {
registered_name: "arc-standard"
}
resource {
name: 'label-map'
part {
file_pattern: 'syntaxnet-tagger.label-map'
file_format: 'text'
}
}
resource {
name: 'tag-map'
part {
file_pattern: 'syntaxnet-tagger.tag-map'
file_format: 'text'
}
}
fixed_feature {
name: "tags"
fml: "input.tag input(1).tag"
embedding_dim: 32
vocabulary_size: 46
size: 2
predicate_map: "hashed"
}
fixed_feature {
name: "tags"
fml: "input(-1).tag input.tag input(1).tag"
embedding_dim: 32
vocabulary_size: 46
size: 3
predicate_map: "hashed"
}
linked_feature {
name: "recurrent_stack"
fml: "stack.focus stack(1).focus"
embedding_dim: 32
size: 2
source_component: "parser"
source_translator: "identity"
source_layer: "hidden_0"
}
backend {
registered_name: "SyntaxNetComponent"
}
}
46
punct 243160
prep 194627
pobj 186958
det 170592
nsubj 144821
nn 144800
amod 117242
ROOT 90592
dobj 88551
aux 76523
advmod 72893
conj 59384
cc 57532
num 36350
poss 35117
dep 34986
ccomp 29470
cop 25991
mark 25141
xcomp 25111
rcmod 16234
auxpass 15740
advcl 14996
possessive 14866
nsubjpass 14133
pcomp 12488
appos 11112
partmod 11106
neg 11090
number 10658
prt 7123
quantmod 6653
tmod 5418
infmod 5134
npadvmod 3213
parataxis 3012
mwe 2793
expl 2712
iobj 1642
acomp 1632
discourse 1381
csubj 1225
predet 1160
preconj 749
goeswith 146
csubjpass 41
component {
name: "tagger"
num_actions : 49
transition_system {
registered_name: "tagger"
parameters {
key: "join_category_to_pos"
value: "true"
}
}
resource {
name: "tag-map"
part {
file_pattern: "TESTDATA/syntaxnet-tagger.tag-map"
file_format: "text"
}
}
resource {
name: "word-map"
part {
file_pattern: "TESTDATA/syntaxnet-tagger.word-map"
file_format: "text"
}
}
resource {
name: "label-map"
part {
file_pattern: "TESTDATA/syntaxnet-tagger.label-map"
file_format: "text"
}
}
fixed_feature {
name: "words"
fml: "input(-1).word input(-2).word input(-3).word input.word input(1).word input(2).word input(3).word"
embedding_dim: 64
vocabulary_size: 39397
size: 7
}
fixed_feature {
name: "words"
fml: "input(-3).word input.word input(1).word input(2).word input(3).word"
embedding_dim: 64
vocabulary_size: 39397
size: 5
}
linked_feature {
name: "rnn"
fml: "stack.focus"
embedding_dim: 32
size: 1
source_component: "tagger"
source_translator: "shift-reduce-step"
source_layer: "layer_0"
}
backend {
registered_name: "SyntaxNetComponent"
}
network_unit {
registered_name: 'feed-forward'
parameters {
key: 'hidden_layer_sizes'
value: '64'
}
}
}
49
NN 285194
IN 228165
DT 179147
NNP 175147
JJ 125667
NNS 115732
, 97481
. 85938
RB 78513
VB 63952
CC 57554
VBD 56635
CD 55674
PRP 55244
VBZ 48126
VBN 44458
VBG 34524
VBP 33669
TO 28772
MD 22364
PRP$ 20706
HYPH 18526
POS 14905
`` 12193
'' 12154
WDT 10267
: 8713
$ 7993
WP 7336
RP 7335
WRB 6634
JJR 6295
NNPS 5917
-RRB- 3904
-LRB- 3840
JJS 3596
RBR 3186
EX 2733
UH 1521
RBS 1467
PDT 1271
FW 928
NFP 844
SYM 652
ADD 476
LS 392
WP$ 332
GW 184
AFX 42
package(
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
)
cc_library(
name = "bulk_feature_extractor",
hdrs = ["bulk_feature_extractor.h"],
deps = [
"//syntaxnet:base",
],
)
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#ifndef DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_
#define DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_
#include <functional>
#include <utility>
#include "tensorflow/core/platform/types.h"
namespace syntaxnet {
namespace dragnn {
// Provides a wrapper for allocator functions and padding data for the Bulk
// ExtractFixedFeatures operation.
class BulkFeatureExtractor {
public:
// Create a BulkFeatureExtractor with the given allocator functions and
// padding. The allocator functions should take a channel and an element
// count and return a contigous block of memory that is associated with that
// channel (the caller can decide what that means). If use_padding is true,
// the provided pad_to_step and pad_to_element will be used to calculate
// the ID size.
BulkFeatureExtractor(
std::function<tensorflow::int32 *(int channel, int num_elements)>
allocate_indices_by_channel,
std::function<tensorflow::int64 *(int channel, int num_elements)>
allocate_ids_by_channel,
std::function<float *(int channel, int num_elements)>
allocate_weights_by_channel,
bool use_padding, int pad_to_step, int pad_to_element)
: use_padding_(use_padding),
pad_to_step_(pad_to_step),
pad_to_element_(pad_to_element),
allocate_indices_by_channel_(std::move(allocate_indices_by_channel)),
allocate_ids_by_channel_(std::move(allocate_ids_by_channel)),
allocate_weights_by_channel_(std::move(allocate_weights_by_channel)) {}
// Create a BulkFeatureExtractor with allocator functions as above, but with
// use_padding set to False. Useful when you know your caller will never
// need to pad.
BulkFeatureExtractor(
std::function<tensorflow::int32 *(int channel, int num_elements)>
allocate_indices_by_channel,
std::function<tensorflow::int64 *(int channel, int num_elements)>
allocate_ids_by_channel,
std::function<float *(int channel, int num_elements)>
allocate_weights_by_channel)
: use_padding_(false),
pad_to_step_(-1),
pad_to_element_(-1),
allocate_indices_by_channel_(std::move(allocate_indices_by_channel)),
allocate_ids_by_channel_(std::move(allocate_ids_by_channel)),
allocate_weights_by_channel_(std::move(allocate_weights_by_channel)) {}
// Invoke the index memory allocator.
tensorflow::int32 *AllocateIndexMemory(int channel, int num_elements) const {
return allocate_indices_by_channel_(channel, num_elements);
}
// Invoke the ID memory allocator.
tensorflow::int64 *AllocateIdMemory(int channel, int num_elements) const {
return allocate_ids_by_channel_(channel, num_elements);
}
// Invoke the weight memory allocator.
float *AllocateWeightMemory(int channel, int num_elements) const {
return allocate_weights_by_channel_(channel, num_elements);
}
// Given the total number of steps and total number of elements for a given
// feature, calculate the index (not ID) of that feature. Based on how the
// BulkFeatureExtractor was constructed, it may use the given number of steps
// and number of elements, or it may use the passed padded number.
int GetIndex(int total_steps, int num_elements, int feature_idx,
int element_idx, int step_idx) const {
const int steps = (use_padding_) ? pad_to_step_ : total_steps;
const int elements = (use_padding_) ? pad_to_element_ : num_elements;
const int feature_offset = elements * steps;
const int element_offset = steps;
return (feature_idx * feature_offset) + (element_idx * element_offset) +
step_idx;
}
private:
const bool use_padding_;
const int pad_to_step_;
const int pad_to_element_;
const std::function<tensorflow::int32 *(int, int)>
allocate_indices_by_channel_;
const std::function<tensorflow::int64 *(int, int)> allocate_ids_by_channel_;
const std::function<float *(int, int)> allocate_weights_by_channel_;
};
} // namespace dragnn
} // namespace syntaxnet
#endif // DRAGNN_COMPONENTS_UTIL_BULK_FEATURE_EXTRACTOR_H_
py_binary(
name = "make_parser_spec",
srcs = ["make_parser_spec.py"],
deps = [
"//dragnn/protos:spec_pb2_py",
"//dragnn/python:spec_builder",
"@absl_py//absl/flags",
"@org_tensorflow//tensorflow:tensorflow_py",
],
)
#!/bin/sh
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# A script to train the CONLL2017 baseline.
set -e
language=English
output_dir=./trained-"$language"
training_corpus=$1
dev_corpus=$2
bazel build -c opt //dragnn/tools:trainer //dragnn/conll2017:make_parser_spec
mkdir -p $output_dir
bazel-bin/dragnn/conll2017/make_parser_spec \
--spec_file="$output_dir/parser_spec.textproto"
bazel-bin/dragnn/tools/trainer \
--logtostderr \
--compute_lexicon \
--dragnn_spec="$output_dir/parser_spec.textproto" \
--resource_path="$output_dir/resources" \
--training_corpus_path="$training_corpus" \
--tune_corpus_path="$dev_corpus" \
--tensorboard_dir="$output_dir/tensorboard" \
--checkpoint_filename="$output_dir/checkpoint.model"
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the spec for the CONLL2017 Parser baseline."""
from absl import flags
import tensorflow as tf
from tensorflow.python.platform import gfile
from dragnn.protos import spec_pb2
from dragnn.python import spec_builder
FLAGS = flags.FLAGS
flags.DEFINE_string('spec_file', 'parser_spec.textproto',
'Filename to save the spec to.')
def main(unused_argv):
# Left-to-right, character-based LSTM.
char2word = spec_builder.ComponentSpecBuilder('char_lstm')
char2word.set_network_unit(
name='wrapped_units.LayerNormBasicLSTMNetwork',
hidden_layer_sizes='256')
char2word.set_transition_system(name='char-shift-only', left_to_right='true')
char2word.add_fixed_feature(name='chars', fml='char-input.text-char',
embedding_dim=16)
# Lookahead LSTM reads right-to-left to represent the rightmost context of the
# words. It gets word embeddings from the char model.
lookahead = spec_builder.ComponentSpecBuilder('lookahead')
lookahead.set_network_unit(
name='wrapped_units.LayerNormBasicLSTMNetwork',
hidden_layer_sizes='256')
lookahead.set_transition_system(name='shift-only', left_to_right='false')
lookahead.add_link(source=char2word, fml='input.last-char-focus',
embedding_dim=64)
# Construct the tagger. This is a simple left-to-right LSTM sequence tagger.
tagger = spec_builder.ComponentSpecBuilder('tagger')
tagger.set_network_unit(
name='wrapped_units.LayerNormBasicLSTMNetwork',
hidden_layer_sizes='256')
tagger.set_transition_system(name='tagger')
tagger.add_token_link(source=lookahead, fml='input.focus', embedding_dim=64)
# Construct the parser.
parser = spec_builder.ComponentSpecBuilder('parser')
parser.set_network_unit(name='FeedForwardNetwork', hidden_layer_sizes='256',
layer_norm_hidden='true')
parser.set_transition_system(name='arc-standard')
parser.add_token_link(source=lookahead, fml='input.focus', embedding_dim=64)
parser.add_token_link(
source=tagger, fml='input.focus stack.focus stack(1).focus',
embedding_dim=64)
# Add discrete features of the predicted parse tree so far, like in Parsey
# McParseface.
parser.add_fixed_feature(name='labels', embedding_dim=16,
fml=' '.join([
'stack.child(1).label',
'stack.child(1).sibling(-1).label',
'stack.child(-1).label',
'stack.child(-1).sibling(1).label',
'stack(1).child(1).label',
'stack(1).child(1).sibling(-1).label',
'stack(1).child(-1).label',
'stack(1).child(-1).sibling(1).label',
'stack.child(2).label',
'stack.child(-2).label',
'stack(1).child(2).label',
'stack(1).child(-2).label']))
# Recurrent connection for the arc-standard parser. For both tokens on the
# stack, we connect to the last time step to either SHIFT or REDUCE that
# token. This allows the parser to build up compositional representations of
# phrases.
parser.add_link(
source=parser, # recurrent connection
name='rnn-stack', # unique identifier
fml='stack.focus stack(1).focus', # look for both stack tokens
source_translator='shift-reduce-step', # maps token indices -> step
embedding_dim=64) # project down to 64 dims
master_spec = spec_pb2.MasterSpec()
master_spec.component.extend(
[char2word.spec, lookahead.spec, tagger.spec, parser.spec])
with gfile.FastGFile(FLAGS.spec_file, 'w') as f:
f.write(str(master_spec).encode('utf-8'))
if __name__ == '__main__':
tf.app.run()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment