diff --git a/tensorflow/contrib/distributions/python/ops/bernoulli.py b/tensorflow/contrib/distributions/python/ops/bernoulli.py index 44962a5f1b93ee..fa2f9f0caa2a93 100644 --- a/tensorflow/contrib/distributions/python/ops/bernoulli.py +++ b/tensorflow/contrib/distributions/python/ops/bernoulli.py @@ -182,7 +182,6 @@ def __init__(self, self._parameters = parameters -@kullback_leibler.RegisterKL(Bernoulli, Bernoulli) def _kl_bernoulli_bernoulli(a, b, name=None): """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli. @@ -200,3 +199,10 @@ def _kl_bernoulli_bernoulli(a, b, name=None): nn.softplus(-b.logits)) + math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) + nn.softplus(b.logits))) + + +kl_classes = [ + Bernoulli, + BernoulliWithSigmoidP, +] +kullback_leibler.register_pairwise_kls(kl_classes, _kl_bernoulli_bernoulli) diff --git a/tensorflow/contrib/distributions/python/ops/beta.py b/tensorflow/contrib/distributions/python/ops/beta.py index 2ccd3519c4e07b..9ed50c2b58e1e6 100644 --- a/tensorflow/contrib/distributions/python/ops/beta.py +++ b/tensorflow/contrib/distributions/python/ops/beta.py @@ -332,7 +332,4 @@ def _kl_beta_beta(d1, d2, name=None): Beta, BetaWithSoftplusAB, ] - -for beta_aa in kl_classes: - for beta_bb in kl_classes: - kullback_leibler.RegisterKL(beta_aa, beta_bb)(_kl_beta_beta) +kullback_leibler.register_pairwise_kls(kl_classes, _kl_beta_beta) diff --git a/tensorflow/contrib/distributions/python/ops/kullback_leibler.py b/tensorflow/contrib/distributions/python/ops/kullback_leibler.py index 90f2fdf95ebed7..bc0c186032790c 100644 --- a/tensorflow/contrib/distributions/python/ops/kullback_leibler.py +++ b/tensorflow/contrib/distributions/python/ops/kullback_leibler.py @@ -109,3 +109,20 @@ def __call__(self, kl_fn): _DIVERGENCES[self._key])) _DIVERGENCES[self._key] = kl_fn return kl_fn + + +def register_pairwise_kls(kl_classes, kl_fn): + """Registers `kl_fn` for each pair of classes in `kl_classes`. + + Args: + kl_classes: classes for which to register KL implementation + kl_fn: The function to use for the KL divergence. + + Returns: + None + """ + for cls_a in kl_classes: + RegisterKL(cls_a, cls_a)(kl_fn) + for cls_b in kl_classes: + if cls_a != cls_b: + RegisterKL(cls_a, cls_b)(kl_fn) diff --git a/tensorflow/contrib/distributions/python/ops/mvn.py b/tensorflow/contrib/distributions/python/ops/mvn.py index fd72740b9f64dc..c08212c659ec39 100644 --- a/tensorflow/contrib/distributions/python/ops/mvn.py +++ b/tensorflow/contrib/distributions/python/ops/mvn.py @@ -780,12 +780,4 @@ def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None): MultivariateNormalDiag, MultivariateNormalDiagPlusVDVT, ] - - -for mvn_aa in kl_classes: - # Register when they are the same here, and do not register when they are the - # same below because that would result in a repeated registration. - kullback_leibler.RegisterKL(mvn_aa, mvn_aa)(_kl_mvn_mvn_brute_force) - for mvn_bb in kl_classes: - if mvn_bb != mvn_aa: - kullback_leibler.RegisterKL(mvn_aa, mvn_bb)(_kl_mvn_mvn_brute_force) +kullback_leibler.register_pairwise_kls(kl_classes, _kl_mvn_mvn_brute_force) diff --git a/tensorflow/contrib/distributions/python/ops/normal.py b/tensorflow/contrib/distributions/python/ops/normal.py index a83df4bc995fb7..58daf17548dca1 100644 --- a/tensorflow/contrib/distributions/python/ops/normal.py +++ b/tensorflow/contrib/distributions/python/ops/normal.py @@ -225,7 +225,6 @@ def __init__(self, self._parameters = parameters -@kullback_leibler.RegisterKL(Normal, Normal) def _kl_normal_normal(n_a, n_b, name=None): """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. @@ -247,3 +246,10 @@ def _kl_normal_normal(n_a, n_b, name=None): ratio = s_a_squared / s_b_squared return (math_ops.square(n_a.mu - n_b.mu) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio))) + + +kl_classes = [ + Normal, + NormalWithSoftplusSigma, +] +kullback_leibler.register_pairwise_kls(kl_classes, _kl_normal_normal) diff --git a/tensorflow/core/common_runtime/function.cc b/tensorflow/core/common_runtime/function.cc index c868083efda5ee..0f8629b28399a3 100644 --- a/tensorflow/core/common_runtime/function.cc +++ b/tensorflow/core/common_runtime/function.cc @@ -39,12 +39,17 @@ limitations under the License. namespace tensorflow { // A few string constant used throughout this module. -static const char* const kArgOp = "_Arg"; -static const char* const kRetOp = "_Retval"; -static const char* const kGradientOp = "SymbolicGradient"; -static const char* const kNodeLabel = "Func"; -static const char* const kFuncAttr = "f"; -static const char* const kNoInlineAttr = "_noinline"; +// +// TODO(zhifengc): Dedup some of these constants into +// framework/function.h +static constexpr const char* const kArgOp = "_Arg"; +static constexpr const char* const kRetOp = "_Retval"; +static constexpr const char* const kGradientOp = + FunctionLibraryDefinition::kGradientOp; +static constexpr const char* const kNodeLabel = "Func"; +static constexpr const char* const kFuncAttr = + FunctionLibraryDefinition::kFuncAttr; +static constexpr const char* const kNoInlineAttr = "_noinline"; // Represents the index-th output of a node. struct Endpoint { @@ -926,46 +931,13 @@ static void InlineFunctionBody(Graph* g, Node* caller, g->RemoveNode(caller); // 'caller' is replaced with inlined nodes. } -// Given a node's NodeDef, returns false iff the node explicitly -// specified _noinline. This gives ExpandInlineFunctions a heuristic -// to decide whether to inline the function. -bool ShouldInline(const NodeDef& ndef) { - bool noinline = false; - if (GetNodeAttr(ndef, kNoInlineAttr, &noinline).ok()) { - // If the node specifies attribute '_noinline', returns accordingly. - return !noinline; - } - if (ndef.op() != kGradientOp) { - // If the op is not SymbolicGradient, we should be free to decide - // whether to inline or not. - return true; - } - // If the node is a SymbolicGradient, we use the forward - // function's attribute '_noinline' instead. - const NameAttrList* forward_func_attrs; - Status s = - GetNodeAttr(AttrSlice(&ndef.attr()), kFuncAttr, &forward_func_attrs); - if (!s.ok()) { - // The node def is malformed (missing attribute 'f'), we'll just - // continue and the runtime will error out. - return false; - } - s = GetNodeAttr(AttrSlice(&forward_func_attrs->attr()), kNoInlineAttr, - &noinline); - if (!s.ok()) { - // The forward function doesn't specify '_noinline' attr, we should - // be free to decide. - return true; - } - // Otherwise, make inline decision according to the attr. - return !noinline; -} - bool ExpandInlineFunctions(FunctionLibraryRuntime* lib, Graph* graph) { std::vector> candidates; + const FunctionLibraryDefinition* fld = lib->GetFunctionLibraryDefinition(); for (Node* node : graph->nodes()) { VLOG(3) << "Expanding " << node->DebugString(); - if (!ShouldInline(node->def())) { + bool noinline; + if (fld->GetAttr(node->def(), kNoInlineAttr, &noinline).ok() && noinline) { VLOG(3) << "noinline: " << node->DebugString(); continue; } diff --git a/tensorflow/core/framework/function.cc b/tensorflow/core/framework/function.cc index bedc85ab4e7f7b..5d260c62ec9f97 100644 --- a/tensorflow/core/framework/function.cc +++ b/tensorflow/core/framework/function.cc @@ -1012,6 +1012,32 @@ Status FunctionLibraryDefinition::LookUp( return default_registry_->LookUp(op, op_reg_data); } +const FunctionDef* FunctionLibraryDefinition::GetAttrImpl( + const NodeDef& ndef) const { + if (ndef.op() != kGradientOp) { + // If 'ndef' calls a function and the function's def has the attr, + // returns it. + return Find(ndef.op()); + } + + // If ndef is SymbolicGradient[f=Foo], we use Foo's gradient or + // Foo's attributes. + const NameAttrList* forward_func_attrs; + if (!GetNodeAttr(AttrSlice(&ndef.attr()), kFuncAttr, &forward_func_attrs) + .ok()) { + return nullptr; + } + const string& func_name = forward_func_attrs->name(); + const string& grad_name = FindGradient(func_name); + // If 'func' has a user-defined gradient function, uses the grad + // function's attrs to see if noinline is specified. Otherwise, + // uses func's attrs. + if (!grad_name.empty()) { + return Find(grad_name); + } + return Find(func_name); +} + FunctionDefLibrary FunctionLibraryDefinition::ToProto() const { FunctionDefLibrary lib; for (const auto& f : function_defs_) { diff --git a/tensorflow/core/framework/function.h b/tensorflow/core/framework/function.h index 67c71be46c37cb..5cb4e28faf1d18 100644 --- a/tensorflow/core/framework/function.h +++ b/tensorflow/core/framework/function.h @@ -16,8 +16,6 @@ limitations under the License. #ifndef TENSORFLOW_FRAMEWORK_FUNCTION_H_ #define TENSORFLOW_FRAMEWORK_FUNCTION_H_ -#include - #include #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/function.pb.h" @@ -26,6 +24,8 @@ limitations under the License. #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/selective_registration.h" #include "tensorflow/core/framework/types.h" +#include "tensorflow/core/lib/gtl/flatmap.h" +#include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/protobuf.h" @@ -308,6 +308,15 @@ class FunctionLibraryDefinition : public OpRegistryInterface { Status LookUp(const string& op_type_name, const OpRegistrationData** op_reg_data) const override; + static constexpr const char* const kGradientOp = "SymbolicGradient"; + static constexpr const char* const kFuncAttr = "f"; + + // Given a node def 'ndef', inspects attributes of the callee + // function to derive the attribute 'value' for 'attr'. Returns OK + // iff the attribute is given by the function's definition. + template + Status GetAttr(const NodeDef& ndef, const string& attr, T* value) const; + // Returns a proto representation of the state of this function library. FunctionDefLibrary ToProto() const; @@ -322,9 +331,13 @@ class FunctionLibraryDefinition : public OpRegistryInterface { }; const OpRegistryInterface* const default_registry_; - std::unordered_map> + gtl::FlatMap, HashStr> function_defs_; - std::unordered_map func_grad_; + gtl::FlatMap func_grad_; + + // Helper function for GetAttr. Returns the FunctionDef* to get the + // attr from. + const FunctionDef* GetAttrImpl(const NodeDef& ndef) const; }; // Forward declare. Defined in common_runtime/function.h @@ -473,6 +486,18 @@ bool RegisterOp(const string& op, Creator func); Status GetOpGradientCreator(const string& op, Creator* creator); }; +// Implementation details. + +template +Status FunctionLibraryDefinition::GetAttr(const NodeDef& ndef, + const string& attr, T* value) const { + const FunctionDef* fdef = GetAttrImpl(ndef); + if (fdef && GetNodeAttr(AttrSlice(&fdef->attr()), attr, value).ok()) { + return Status::OK(); + } + return errors::InvalidArgument("Attr ", attr, " is not defined."); +} + } // end namespace tensorflow #endif // TENSORFLOW_FRAMEWORK_FUNCTION_H_ diff --git a/tensorflow/core/framework/function.proto b/tensorflow/core/framework/function.proto index 4a1ad7a12596bf..5a394d64809286 100644 --- a/tensorflow/core/framework/function.proto +++ b/tensorflow/core/framework/function.proto @@ -27,6 +27,9 @@ message FunctionDef { // attrs etc. OpDef signature = 1; + // Attributes specific to this function definition. + map attr = 5; + // TO BE REPLACED // The body of the function. diff --git a/tensorflow/core/framework/function_test.cc b/tensorflow/core/framework/function_test.cc index 5cb115157a4d4f..8fe11df7294630 100644 --- a/tensorflow/core/framework/function_test.cc +++ b/tensorflow/core/framework/function_test.cc @@ -995,4 +995,84 @@ TEST(FunctionLibraryDefinitionTest, ToProto) { EXPECT_EQ(f3->DebugString(), f4->DebugString()); } +TEST(FunctionLibraryDefinitionTest, GetAttr_FuncNoAttr) { + FunctionDefLibrary proto; + *proto.add_function() = test::function::XTimesTwo(); + FunctionLibraryDefinition lib(OpRegistry::Global(), proto); + + NodeDef ndef; + bool annotation; + + // Not a function. + ndef.set_op("Matmul"); + EXPECT_FALSE(lib.GetAttr(ndef, "annotation", &annotation).ok()); + + // A function. No attr defined. + ndef.set_op("XTimesTwo"); + EXPECT_FALSE(lib.GetAttr(ndef, "annotation", &annotation).ok()); + + // ndef defines the attr. But we don't care. + AddNodeAttr("annotation", true, &ndef); + EXPECT_FALSE(lib.GetAttr(ndef, "annotation", &annotation).ok()); +} + +template +void SetAttrValue(FunctionDef* fdef, const string& attr, const T& value) { + AttrValue attr_value; + SetAttrValue(value, &attr_value); + fdef->mutable_attr()->insert({attr, attr_value}); +} + +TEST(FunctionLibraryDefinitionTest, GetAttr_FuncWithAttr) { + FunctionDefLibrary proto; + auto fdef = proto.add_function(); + *fdef = test::function::XTimesTwo(); + SetAttrValue(fdef, "annotation", true); + SetAttrValue(fdef, "options", "some string data"); + FunctionLibraryDefinition lib(OpRegistry::Global(), proto); + + NodeDef ndef; + bool annotation; + + // A function. No attr defined in ndef. + ndef.set_op("XTimesTwo"); + TF_EXPECT_OK(lib.GetAttr(ndef, "annotation", &annotation)); + EXPECT_EQ(annotation, true); + + string str; + TF_EXPECT_OK(lib.GetAttr(ndef, "options", &str)); + EXPECT_EQ(str, "some string data"); +} + +TEST(FunctionLibraryDefinitionTest, GetAttr_Gradient) { + FunctionDefLibrary proto; + auto fdef = proto.add_function(); + *fdef = test::function::XTimesTwo(); + SetAttrValue(fdef, "annotation", true); + *fdef = test::function::WXPlusB(); + SetAttrValue(fdef, "annotation", false); + auto func_grad = proto.add_gradient(); + func_grad->set_function_name("XTimesTwo"); + func_grad->set_gradient_func("WXPlusB"); + FunctionLibraryDefinition lib(OpRegistry::Global(), proto); + + NodeDef ndef; + ndef.set_op(FunctionLibraryDefinition::kGradientOp); + + bool annotation; + EXPECT_FALSE(lib.GetAttr(ndef, "annotation", &annotation).ok()); + + NameAttrList nal; + nal.set_name("XTimesTwo"); + AddNodeAttr(FunctionLibraryDefinition::kFuncAttr, nal, &ndef); + TF_EXPECT_OK(lib.GetAttr(ndef, "annotation", &annotation)); + EXPECT_EQ(annotation, false); // XTimesTwo's gradient is WXPlusB. + + nal.set_name("WXPlusB"); + ndef.clear_attr(); + AddNodeAttr(FunctionLibraryDefinition::kFuncAttr, nal, &ndef); + TF_EXPECT_OK(lib.GetAttr(ndef, "annotation", &annotation)); + EXPECT_EQ(annotation, false); // WXPlusB has no custom gradient. +} + } // end namespace tensorflow diff --git a/tensorflow/g3doc/api_docs/python/contrib.distributions.md b/tensorflow/g3doc/api_docs/python/contrib.distributions.md index a86285a0196046..45c979275590e7 100644 --- a/tensorflow/g3doc/api_docs/python/contrib.distributions.md +++ b/tensorflow/g3doc/api_docs/python/contrib.distributions.md @@ -23136,1175 +23136,18 @@ Initialize the KL registrar. ## Other Functions and Classes - - - -### `class tf.contrib.distributions.beta_aa` {#beta_aa} - -Beta with softplus transform on `a` and `b`. -- - - - -#### `tf.contrib.distributions.beta_aa.__init__(a, b, validate_args=False, allow_nan_stats=True, name='BetaWithSoftplusAB')` {#beta_aa.__init__} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.a` {#beta_aa.a} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_aa.a_b_sum` {#beta_aa.a_b_sum} - -Sum of parameters. - - -- - - - -#### `tf.contrib.distributions.beta_aa.allow_nan_stats` {#beta_aa.allow_nan_stats} - -Python boolean describing behavior when a stat is undefined. - -Stats return +/- infinity when it makes sense. E.g., the variance -of a Cauchy distribution is infinity. However, sometimes the -statistic is undefined, e.g., if a distribution's pdf does not achieve a -maximum within the support of the distribution, the mode is undefined. -If the mean is undefined, then by definition the variance is undefined. -E.g. the mean for Student's T for df = 1 is undefined (no clear way to say -it is either + or - infinity), so the variance = E[(X - mean)^2] is also -undefined. - -##### Returns: - - -* `allow_nan_stats`: Python boolean. - - -- - - - -#### `tf.contrib.distributions.beta_aa.b` {#beta_aa.b} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_aa.batch_shape(name='batch_shape')` {#beta_aa.batch_shape} - -Shape of a single sample from a single event index as a 1-D `Tensor`. - -The product of the dimensions of the `batch_shape` is the number of -independent distributions of this kind the instance represents. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `batch_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.cdf(value, name='cdf', **condition_kwargs)` {#beta_aa.cdf} - -Cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -cdf(x) := P[X <= x] -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `cdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.copy(**override_parameters_kwargs)` {#beta_aa.copy} - -Creates a deep copy of the distribution. - -Note: the copy distribution may continue to depend on the original -intialization arguments. - -##### Args: - - -* `**override_parameters_kwargs`: String/value dictionary of initialization - arguments to override with new values. - -##### Returns: - - -* `distribution`: A new instance of `type(self)` intitialized from the union - of self.parameters and override_parameters_kwargs, i.e., - `dict(self.parameters, **override_parameters_kwargs)`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.dtype` {#beta_aa.dtype} - -The `DType` of `Tensor`s handled by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.entropy(name='entropy')` {#beta_aa.entropy} - -Shannon entropy in nats. - - -- - - - -#### `tf.contrib.distributions.beta_aa.event_shape(name='event_shape')` {#beta_aa.event_shape} - -Shape of a single sample from a single batch as a 1-D int32 `Tensor`. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `event_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.get_batch_shape()` {#beta_aa.get_batch_shape} - -Shape of a single sample from a single event index as a `TensorShape`. - -Same meaning as `batch_shape`. May be only partially defined. - -##### Returns: - - -* `batch_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_aa.get_event_shape()` {#beta_aa.get_event_shape} - -Shape of a single sample from a single batch as a `TensorShape`. - -Same meaning as `event_shape`. May be only partially defined. - -##### Returns: - - -* `event_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_aa.is_continuous` {#beta_aa.is_continuous} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.is_reparameterized` {#beta_aa.is_reparameterized} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_cdf(value, name='log_cdf', **condition_kwargs)` {#beta_aa.log_cdf} - -Log cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -log_cdf(x) := Log[ P[X <= x] ] -``` - -Often, a numerical approximation can be used for `log_cdf(x)` that yields -a more accurate answer than simply taking the logarithm of the `cdf` when -`x << -1`. - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `logcdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_pdf(value, name='log_pdf', **condition_kwargs)` {#beta_aa.log_pdf} - -Log probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_pmf(value, name='log_pmf', **condition_kwargs)` {#beta_aa.log_pmf} - -Log probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_prob(value, name='log_prob', **condition_kwargs)` {#beta_aa.log_prob} - -Log probability density/mass function (depending on `is_continuous`). - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_survival_function(value, name='log_survival_function', **condition_kwargs)` {#beta_aa.log_survival_function} - -Log survival function. - -Given random variable `X`, the survival function is defined: - -``` -log_survival_function(x) = Log[ P[X > x] ] - = Log[ 1 - P[X <= x] ] - = Log[ 1 - cdf(x) ] -``` - -Typically, different numerical approximations can be used for the log -survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.mean(name='mean')` {#beta_aa.mean} - -Mean. - - -- - - - -#### `tf.contrib.distributions.beta_aa.mode(name='mode')` {#beta_aa.mode} - -Mode. - -Additional documentation from `Beta`: - -Note that the mode for the Beta distribution is only defined -when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`, -and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception -will be raised rather than returning `NaN`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.name` {#beta_aa.name} - -Name prepended to all ops created by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.param_shapes(cls, sample_shape, name='DistributionParamShapes')` {#beta_aa.param_shapes} - -Shapes of parameters given the desired shape of a call to `sample()`. - -Subclasses should override static method `_param_shapes`. - -##### Args: - - -* `sample_shape`: `Tensor` or python list/tuple. Desired shape of a call to - `sample()`. -* `name`: name to prepend ops with. - -##### Returns: - - `dict` of parameter name to `Tensor` shapes. - - -- - - - -#### `tf.contrib.distributions.beta_aa.param_static_shapes(cls, sample_shape)` {#beta_aa.param_static_shapes} - -param_shapes with static (i.e. TensorShape) shapes. - -##### Args: - - -* `sample_shape`: `TensorShape` or python list/tuple. Desired shape of a call - to `sample()`. - -##### Returns: - - `dict` of parameter name to `TensorShape`. - -##### Raises: - - -* `ValueError`: if `sample_shape` is a `TensorShape` and is not fully defined. - - -- - - - -#### `tf.contrib.distributions.beta_aa.parameters` {#beta_aa.parameters} - -Dictionary of parameters used to instantiate this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.pdf(value, name='pdf', **condition_kwargs)` {#beta_aa.pdf} - -Probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.pmf(value, name='pmf', **condition_kwargs)` {#beta_aa.pmf} - -Probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.prob(value, name='prob', **condition_kwargs)` {#beta_aa.prob} - -Probability density/mass function (depending on `is_continuous`). - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.sample(sample_shape=(), seed=None, name='sample', **condition_kwargs)` {#beta_aa.sample} - -Generate samples of the specified shape. - -Note that a call to `sample()` without arguments will generate a single -sample. - -##### Args: - - -* `sample_shape`: 0D or 1D `int32` `Tensor`. Shape of the generated samples. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with prepended dimensions `sample_shape`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.sample_n(n, seed=None, name='sample_n', **condition_kwargs)` {#beta_aa.sample_n} - -Generate `n` samples. - -##### Args: - - -* `n`: `Scalar` `Tensor` of type `int32` or `int64`, the number of - observations to sample. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with a prepended dimension (n,). - -##### Raises: - - -* `TypeError`: if `n` is not an integer type. - - -- - - - -#### `tf.contrib.distributions.beta_aa.std(name='std')` {#beta_aa.std} - -Standard deviation. - - -- - - - -#### `tf.contrib.distributions.beta_aa.survival_function(value, name='survival_function', **condition_kwargs)` {#beta_aa.survival_function} - -Survival function. - -Given random variable `X`, the survival function is defined: - -``` -survival_function(x) = P[X > x] - = 1 - P[X <= x] - = 1 - cdf(x). -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.validate_args` {#beta_aa.validate_args} - -Python boolean indicated possibly expensive checks are enabled. - - -- - - - -#### `tf.contrib.distributions.beta_aa.variance(name='variance')` {#beta_aa.variance} - -Variance. - - - -- - - - -### `class tf.contrib.distributions.beta_bb` {#beta_bb} - -Beta with softplus transform on `a` and `b`. -- - - - -#### `tf.contrib.distributions.beta_bb.__init__(a, b, validate_args=False, allow_nan_stats=True, name='BetaWithSoftplusAB')` {#beta_bb.__init__} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.a` {#beta_bb.a} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_bb.a_b_sum` {#beta_bb.a_b_sum} - -Sum of parameters. - - -- - - - -#### `tf.contrib.distributions.beta_bb.allow_nan_stats` {#beta_bb.allow_nan_stats} - -Python boolean describing behavior when a stat is undefined. - -Stats return +/- infinity when it makes sense. E.g., the variance -of a Cauchy distribution is infinity. However, sometimes the -statistic is undefined, e.g., if a distribution's pdf does not achieve a -maximum within the support of the distribution, the mode is undefined. -If the mean is undefined, then by definition the variance is undefined. -E.g. the mean for Student's T for df = 1 is undefined (no clear way to say -it is either + or - infinity), so the variance = E[(X - mean)^2] is also -undefined. - -##### Returns: - - -* `allow_nan_stats`: Python boolean. - - -- - - - -#### `tf.contrib.distributions.beta_bb.b` {#beta_bb.b} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_bb.batch_shape(name='batch_shape')` {#beta_bb.batch_shape} - -Shape of a single sample from a single event index as a 1-D `Tensor`. - -The product of the dimensions of the `batch_shape` is the number of -independent distributions of this kind the instance represents. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `batch_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.cdf(value, name='cdf', **condition_kwargs)` {#beta_bb.cdf} - -Cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -cdf(x) := P[X <= x] -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `cdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.copy(**override_parameters_kwargs)` {#beta_bb.copy} - -Creates a deep copy of the distribution. - -Note: the copy distribution may continue to depend on the original -intialization arguments. - -##### Args: - - -* `**override_parameters_kwargs`: String/value dictionary of initialization - arguments to override with new values. - -##### Returns: - - -* `distribution`: A new instance of `type(self)` intitialized from the union - of self.parameters and override_parameters_kwargs, i.e., - `dict(self.parameters, **override_parameters_kwargs)`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.dtype` {#beta_bb.dtype} - -The `DType` of `Tensor`s handled by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.entropy(name='entropy')` {#beta_bb.entropy} - -Shannon entropy in nats. - - -- - - - -#### `tf.contrib.distributions.beta_bb.event_shape(name='event_shape')` {#beta_bb.event_shape} - -Shape of a single sample from a single batch as a 1-D int32 `Tensor`. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `event_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.get_batch_shape()` {#beta_bb.get_batch_shape} - -Shape of a single sample from a single event index as a `TensorShape`. - -Same meaning as `batch_shape`. May be only partially defined. - -##### Returns: - - -* `batch_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_bb.get_event_shape()` {#beta_bb.get_event_shape} - -Shape of a single sample from a single batch as a `TensorShape`. - -Same meaning as `event_shape`. May be only partially defined. - -##### Returns: - - -* `event_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_bb.is_continuous` {#beta_bb.is_continuous} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.is_reparameterized` {#beta_bb.is_reparameterized} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_cdf(value, name='log_cdf', **condition_kwargs)` {#beta_bb.log_cdf} - -Log cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -log_cdf(x) := Log[ P[X <= x] ] -``` - -Often, a numerical approximation can be used for `log_cdf(x)` that yields -a more accurate answer than simply taking the logarithm of the `cdf` when -`x << -1`. - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `logcdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_pdf(value, name='log_pdf', **condition_kwargs)` {#beta_bb.log_pdf} - -Log probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_pmf(value, name='log_pmf', **condition_kwargs)` {#beta_bb.log_pmf} - -Log probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_prob(value, name='log_prob', **condition_kwargs)` {#beta_bb.log_prob} - -Log probability density/mass function (depending on `is_continuous`). - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_survival_function(value, name='log_survival_function', **condition_kwargs)` {#beta_bb.log_survival_function} - -Log survival function. - -Given random variable `X`, the survival function is defined: - -``` -log_survival_function(x) = Log[ P[X > x] ] - = Log[ 1 - P[X <= x] ] - = Log[ 1 - cdf(x) ] -``` - -Typically, different numerical approximations can be used for the log -survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.mean(name='mean')` {#beta_bb.mean} - -Mean. - - -- - - - -#### `tf.contrib.distributions.beta_bb.mode(name='mode')` {#beta_bb.mode} - -Mode. - -Additional documentation from `Beta`: - -Note that the mode for the Beta distribution is only defined -when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`, -and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception -will be raised rather than returning `NaN`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.name` {#beta_bb.name} - -Name prepended to all ops created by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.param_shapes(cls, sample_shape, name='DistributionParamShapes')` {#beta_bb.param_shapes} - -Shapes of parameters given the desired shape of a call to `sample()`. - -Subclasses should override static method `_param_shapes`. - -##### Args: - - -* `sample_shape`: `Tensor` or python list/tuple. Desired shape of a call to - `sample()`. -* `name`: name to prepend ops with. - -##### Returns: - - `dict` of parameter name to `Tensor` shapes. - - -- - - - -#### `tf.contrib.distributions.beta_bb.param_static_shapes(cls, sample_shape)` {#beta_bb.param_static_shapes} - -param_shapes with static (i.e. TensorShape) shapes. - -##### Args: - - -* `sample_shape`: `TensorShape` or python list/tuple. Desired shape of a call - to `sample()`. - -##### Returns: - - `dict` of parameter name to `TensorShape`. - -##### Raises: - - -* `ValueError`: if `sample_shape` is a `TensorShape` and is not fully defined. - - -- - - - -#### `tf.contrib.distributions.beta_bb.parameters` {#beta_bb.parameters} - -Dictionary of parameters used to instantiate this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.pdf(value, name='pdf', **condition_kwargs)` {#beta_bb.pdf} - -Probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.pmf(value, name='pmf', **condition_kwargs)` {#beta_bb.pmf} - -Probability mass function. - -##### Args: +### `tf.contrib.distributions.register_pairwise_kls(kl_classes, kl_fn)` {#register_pairwise_kls} - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.prob(value, name='prob', **condition_kwargs)` {#beta_bb.prob} - -Probability density/mass function (depending on `is_continuous`). - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.sample(sample_shape=(), seed=None, name='sample', **condition_kwargs)` {#beta_bb.sample} - -Generate samples of the specified shape. - -Note that a call to `sample()` without arguments will generate a single -sample. - -##### Args: - - -* `sample_shape`: 0D or 1D `int32` `Tensor`. Shape of the generated samples. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with prepended dimensions `sample_shape`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.sample_n(n, seed=None, name='sample_n', **condition_kwargs)` {#beta_bb.sample_n} - -Generate `n` samples. - -##### Args: - - -* `n`: `Scalar` `Tensor` of type `int32` or `int64`, the number of - observations to sample. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with a prepended dimension (n,). - -##### Raises: - - -* `TypeError`: if `n` is not an integer type. - - -- - - - -#### `tf.contrib.distributions.beta_bb.std(name='std')` {#beta_bb.std} - -Standard deviation. - - -- - - - -#### `tf.contrib.distributions.beta_bb.survival_function(value, name='survival_function', **condition_kwargs)` {#beta_bb.survival_function} - -Survival function. - -Given random variable `X`, the survival function is defined: - -``` -survival_function(x) = P[X > x] - = 1 - P[X <= x] - = 1 - cdf(x). -``` +Registers `kl_fn` for each pair of classes in `kl_classes`. ##### Args: -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. +* `kl_classes`: classes for which to register KL implementation +* `kl_fn`: The function to use for the KL divergence. ##### Returns: - Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.validate_args` {#beta_bb.validate_args} - -Python boolean indicated possibly expensive checks are enabled. - - -- - - - -#### `tf.contrib.distributions.beta_bb.variance(name='variance')` {#beta_bb.variance} - -Variance. - + None diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.distributions.beta_bb.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.distributions.beta_bb.md deleted file mode 100644 index d7fe415774c27a..00000000000000 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard5/tf.contrib.distributions.beta_bb.md +++ /dev/null @@ -1,582 +0,0 @@ -Beta with softplus transform on `a` and `b`. -- - - - -#### `tf.contrib.distributions.beta_bb.__init__(a, b, validate_args=False, allow_nan_stats=True, name='BetaWithSoftplusAB')` {#beta_bb.__init__} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.a` {#beta_bb.a} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_bb.a_b_sum` {#beta_bb.a_b_sum} - -Sum of parameters. - - -- - - - -#### `tf.contrib.distributions.beta_bb.allow_nan_stats` {#beta_bb.allow_nan_stats} - -Python boolean describing behavior when a stat is undefined. - -Stats return +/- infinity when it makes sense. E.g., the variance -of a Cauchy distribution is infinity. However, sometimes the -statistic is undefined, e.g., if a distribution's pdf does not achieve a -maximum within the support of the distribution, the mode is undefined. -If the mean is undefined, then by definition the variance is undefined. -E.g. the mean for Student's T for df = 1 is undefined (no clear way to say -it is either + or - infinity), so the variance = E[(X - mean)^2] is also -undefined. - -##### Returns: - - -* `allow_nan_stats`: Python boolean. - - -- - - - -#### `tf.contrib.distributions.beta_bb.b` {#beta_bb.b} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_bb.batch_shape(name='batch_shape')` {#beta_bb.batch_shape} - -Shape of a single sample from a single event index as a 1-D `Tensor`. - -The product of the dimensions of the `batch_shape` is the number of -independent distributions of this kind the instance represents. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `batch_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.cdf(value, name='cdf', **condition_kwargs)` {#beta_bb.cdf} - -Cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -cdf(x) := P[X <= x] -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `cdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.copy(**override_parameters_kwargs)` {#beta_bb.copy} - -Creates a deep copy of the distribution. - -Note: the copy distribution may continue to depend on the original -intialization arguments. - -##### Args: - - -* `**override_parameters_kwargs`: String/value dictionary of initialization - arguments to override with new values. - -##### Returns: - - -* `distribution`: A new instance of `type(self)` intitialized from the union - of self.parameters and override_parameters_kwargs, i.e., - `dict(self.parameters, **override_parameters_kwargs)`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.dtype` {#beta_bb.dtype} - -The `DType` of `Tensor`s handled by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.entropy(name='entropy')` {#beta_bb.entropy} - -Shannon entropy in nats. - - -- - - - -#### `tf.contrib.distributions.beta_bb.event_shape(name='event_shape')` {#beta_bb.event_shape} - -Shape of a single sample from a single batch as a 1-D int32 `Tensor`. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `event_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.get_batch_shape()` {#beta_bb.get_batch_shape} - -Shape of a single sample from a single event index as a `TensorShape`. - -Same meaning as `batch_shape`. May be only partially defined. - -##### Returns: - - -* `batch_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_bb.get_event_shape()` {#beta_bb.get_event_shape} - -Shape of a single sample from a single batch as a `TensorShape`. - -Same meaning as `event_shape`. May be only partially defined. - -##### Returns: - - -* `event_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_bb.is_continuous` {#beta_bb.is_continuous} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.is_reparameterized` {#beta_bb.is_reparameterized} - - - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_cdf(value, name='log_cdf', **condition_kwargs)` {#beta_bb.log_cdf} - -Log cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -log_cdf(x) := Log[ P[X <= x] ] -``` - -Often, a numerical approximation can be used for `log_cdf(x)` that yields -a more accurate answer than simply taking the logarithm of the `cdf` when -`x << -1`. - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `logcdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_pdf(value, name='log_pdf', **condition_kwargs)` {#beta_bb.log_pdf} - -Log probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_pmf(value, name='log_pmf', **condition_kwargs)` {#beta_bb.log_pmf} - -Log probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_prob(value, name='log_prob', **condition_kwargs)` {#beta_bb.log_prob} - -Log probability density/mass function (depending on `is_continuous`). - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.log_survival_function(value, name='log_survival_function', **condition_kwargs)` {#beta_bb.log_survival_function} - -Log survival function. - -Given random variable `X`, the survival function is defined: - -``` -log_survival_function(x) = Log[ P[X > x] ] - = Log[ 1 - P[X <= x] ] - = Log[ 1 - cdf(x) ] -``` - -Typically, different numerical approximations can be used for the log -survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.mean(name='mean')` {#beta_bb.mean} - -Mean. - - -- - - - -#### `tf.contrib.distributions.beta_bb.mode(name='mode')` {#beta_bb.mode} - -Mode. - -Additional documentation from `Beta`: - -Note that the mode for the Beta distribution is only defined -when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`, -and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception -will be raised rather than returning `NaN`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.name` {#beta_bb.name} - -Name prepended to all ops created by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.param_shapes(cls, sample_shape, name='DistributionParamShapes')` {#beta_bb.param_shapes} - -Shapes of parameters given the desired shape of a call to `sample()`. - -Subclasses should override static method `_param_shapes`. - -##### Args: - - -* `sample_shape`: `Tensor` or python list/tuple. Desired shape of a call to - `sample()`. -* `name`: name to prepend ops with. - -##### Returns: - - `dict` of parameter name to `Tensor` shapes. - - -- - - - -#### `tf.contrib.distributions.beta_bb.param_static_shapes(cls, sample_shape)` {#beta_bb.param_static_shapes} - -param_shapes with static (i.e. TensorShape) shapes. - -##### Args: - - -* `sample_shape`: `TensorShape` or python list/tuple. Desired shape of a call - to `sample()`. - -##### Returns: - - `dict` of parameter name to `TensorShape`. - -##### Raises: - - -* `ValueError`: if `sample_shape` is a `TensorShape` and is not fully defined. - - -- - - - -#### `tf.contrib.distributions.beta_bb.parameters` {#beta_bb.parameters} - -Dictionary of parameters used to instantiate this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.pdf(value, name='pdf', **condition_kwargs)` {#beta_bb.pdf} - -Probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.pmf(value, name='pmf', **condition_kwargs)` {#beta_bb.pmf} - -Probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.prob(value, name='prob', **condition_kwargs)` {#beta_bb.prob} - -Probability density/mass function (depending on `is_continuous`). - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.sample(sample_shape=(), seed=None, name='sample', **condition_kwargs)` {#beta_bb.sample} - -Generate samples of the specified shape. - -Note that a call to `sample()` without arguments will generate a single -sample. - -##### Args: - - -* `sample_shape`: 0D or 1D `int32` `Tensor`. Shape of the generated samples. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with prepended dimensions `sample_shape`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.sample_n(n, seed=None, name='sample_n', **condition_kwargs)` {#beta_bb.sample_n} - -Generate `n` samples. - -##### Args: - - -* `n`: `Scalar` `Tensor` of type `int32` or `int64`, the number of - observations to sample. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with a prepended dimension (n,). - -##### Raises: - - -* `TypeError`: if `n` is not an integer type. - - -- - - - -#### `tf.contrib.distributions.beta_bb.std(name='std')` {#beta_bb.std} - -Standard deviation. - - -- - - - -#### `tf.contrib.distributions.beta_bb.survival_function(value, name='survival_function', **condition_kwargs)` {#beta_bb.survival_function} - -Survival function. - -Given random variable `X`, the survival function is defined: - -``` -survival_function(x) = P[X > x] - = 1 - P[X <= x] - = 1 - cdf(x). -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_bb.validate_args` {#beta_bb.validate_args} - -Python boolean indicated possibly expensive checks are enabled. - - -- - - - -#### `tf.contrib.distributions.beta_bb.variance(name='variance')` {#beta_bb.variance} - -Variance. - - diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.distributions.beta_aa.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.distributions.beta_aa.md deleted file mode 100644 index 08032b9ac520b0..00000000000000 --- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard7/tf.contrib.distributions.beta_aa.md +++ /dev/null @@ -1,582 +0,0 @@ -Beta with softplus transform on `a` and `b`. -- - - - -#### `tf.contrib.distributions.beta_aa.__init__(a, b, validate_args=False, allow_nan_stats=True, name='BetaWithSoftplusAB')` {#beta_aa.__init__} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.a` {#beta_aa.a} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_aa.a_b_sum` {#beta_aa.a_b_sum} - -Sum of parameters. - - -- - - - -#### `tf.contrib.distributions.beta_aa.allow_nan_stats` {#beta_aa.allow_nan_stats} - -Python boolean describing behavior when a stat is undefined. - -Stats return +/- infinity when it makes sense. E.g., the variance -of a Cauchy distribution is infinity. However, sometimes the -statistic is undefined, e.g., if a distribution's pdf does not achieve a -maximum within the support of the distribution, the mode is undefined. -If the mean is undefined, then by definition the variance is undefined. -E.g. the mean for Student's T for df = 1 is undefined (no clear way to say -it is either + or - infinity), so the variance = E[(X - mean)^2] is also -undefined. - -##### Returns: - - -* `allow_nan_stats`: Python boolean. - - -- - - - -#### `tf.contrib.distributions.beta_aa.b` {#beta_aa.b} - -Shape parameter. - - -- - - - -#### `tf.contrib.distributions.beta_aa.batch_shape(name='batch_shape')` {#beta_aa.batch_shape} - -Shape of a single sample from a single event index as a 1-D `Tensor`. - -The product of the dimensions of the `batch_shape` is the number of -independent distributions of this kind the instance represents. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `batch_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.cdf(value, name='cdf', **condition_kwargs)` {#beta_aa.cdf} - -Cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -cdf(x) := P[X <= x] -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `cdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.copy(**override_parameters_kwargs)` {#beta_aa.copy} - -Creates a deep copy of the distribution. - -Note: the copy distribution may continue to depend on the original -intialization arguments. - -##### Args: - - -* `**override_parameters_kwargs`: String/value dictionary of initialization - arguments to override with new values. - -##### Returns: - - -* `distribution`: A new instance of `type(self)` intitialized from the union - of self.parameters and override_parameters_kwargs, i.e., - `dict(self.parameters, **override_parameters_kwargs)`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.dtype` {#beta_aa.dtype} - -The `DType` of `Tensor`s handled by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.entropy(name='entropy')` {#beta_aa.entropy} - -Shannon entropy in nats. - - -- - - - -#### `tf.contrib.distributions.beta_aa.event_shape(name='event_shape')` {#beta_aa.event_shape} - -Shape of a single sample from a single batch as a 1-D int32 `Tensor`. - -##### Args: - - -* `name`: name to give to the op - -##### Returns: - - -* `event_shape`: `Tensor`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.get_batch_shape()` {#beta_aa.get_batch_shape} - -Shape of a single sample from a single event index as a `TensorShape`. - -Same meaning as `batch_shape`. May be only partially defined. - -##### Returns: - - -* `batch_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_aa.get_event_shape()` {#beta_aa.get_event_shape} - -Shape of a single sample from a single batch as a `TensorShape`. - -Same meaning as `event_shape`. May be only partially defined. - -##### Returns: - - -* `event_shape`: `TensorShape`, possibly unknown. - - -- - - - -#### `tf.contrib.distributions.beta_aa.is_continuous` {#beta_aa.is_continuous} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.is_reparameterized` {#beta_aa.is_reparameterized} - - - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_cdf(value, name='log_cdf', **condition_kwargs)` {#beta_aa.log_cdf} - -Log cumulative distribution function. - -Given random variable `X`, the cumulative distribution function `cdf` is: - -``` -log_cdf(x) := Log[ P[X <= x] ] -``` - -Often, a numerical approximation can be used for `log_cdf(x)` that yields -a more accurate answer than simply taking the logarithm of the `cdf` when -`x << -1`. - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `logcdf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_pdf(value, name='log_pdf', **condition_kwargs)` {#beta_aa.log_pdf} - -Log probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_pmf(value, name='log_pmf', **condition_kwargs)` {#beta_aa.log_pmf} - -Log probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_prob(value, name='log_prob', **condition_kwargs)` {#beta_aa.log_prob} - -Log probability density/mass function (depending on `is_continuous`). - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `log_prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.log_survival_function(value, name='log_survival_function', **condition_kwargs)` {#beta_aa.log_survival_function} - -Log survival function. - -Given random variable `X`, the survival function is defined: - -``` -log_survival_function(x) = Log[ P[X > x] ] - = Log[ 1 - P[X <= x] ] - = Log[ 1 - cdf(x) ] -``` - -Typically, different numerical approximations can be used for the log -survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.mean(name='mean')` {#beta_aa.mean} - -Mean. - - -- - - - -#### `tf.contrib.distributions.beta_aa.mode(name='mode')` {#beta_aa.mode} - -Mode. - -Additional documentation from `Beta`: - -Note that the mode for the Beta distribution is only defined -when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`, -and `NaN` otherwise. If `self.allow_nan_stats` is `False`, an exception -will be raised rather than returning `NaN`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.name` {#beta_aa.name} - -Name prepended to all ops created by this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.param_shapes(cls, sample_shape, name='DistributionParamShapes')` {#beta_aa.param_shapes} - -Shapes of parameters given the desired shape of a call to `sample()`. - -Subclasses should override static method `_param_shapes`. - -##### Args: - - -* `sample_shape`: `Tensor` or python list/tuple. Desired shape of a call to - `sample()`. -* `name`: name to prepend ops with. - -##### Returns: - - `dict` of parameter name to `Tensor` shapes. - - -- - - - -#### `tf.contrib.distributions.beta_aa.param_static_shapes(cls, sample_shape)` {#beta_aa.param_static_shapes} - -param_shapes with static (i.e. TensorShape) shapes. - -##### Args: - - -* `sample_shape`: `TensorShape` or python list/tuple. Desired shape of a call - to `sample()`. - -##### Returns: - - `dict` of parameter name to `TensorShape`. - -##### Raises: - - -* `ValueError`: if `sample_shape` is a `TensorShape` and is not fully defined. - - -- - - - -#### `tf.contrib.distributions.beta_aa.parameters` {#beta_aa.parameters} - -Dictionary of parameters used to instantiate this `Distribution`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.pdf(value, name='pdf', **condition_kwargs)` {#beta_aa.pdf} - -Probability density function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if not `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.pmf(value, name='pmf', **condition_kwargs)` {#beta_aa.pmf} - -Probability mass function. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `pmf`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - -##### Raises: - - -* `TypeError`: if `is_continuous`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.prob(value, name='prob', **condition_kwargs)` {#beta_aa.prob} - -Probability density/mass function (depending on `is_continuous`). - - -Additional documentation from `Beta`: - -Note that the argument `x` must be a non-negative floating point tensor -whose shape can be broadcast with `self.a` and `self.b`. For fixed leading -dimensions, the last dimension represents counts for the corresponding Beta -distribution in `self.a` and `self.b`. `x` is only legal if `0 < x < 1`. - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `prob`: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with - values of type `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.sample(sample_shape=(), seed=None, name='sample', **condition_kwargs)` {#beta_aa.sample} - -Generate samples of the specified shape. - -Note that a call to `sample()` without arguments will generate a single -sample. - -##### Args: - - -* `sample_shape`: 0D or 1D `int32` `Tensor`. Shape of the generated samples. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with prepended dimensions `sample_shape`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.sample_n(n, seed=None, name='sample_n', **condition_kwargs)` {#beta_aa.sample_n} - -Generate `n` samples. - -##### Args: - - -* `n`: `Scalar` `Tensor` of type `int32` or `int64`, the number of - observations to sample. -* `seed`: Python integer seed for RNG -* `name`: name to give to the op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - -* `samples`: a `Tensor` with a prepended dimension (n,). - -##### Raises: - - -* `TypeError`: if `n` is not an integer type. - - -- - - - -#### `tf.contrib.distributions.beta_aa.std(name='std')` {#beta_aa.std} - -Standard deviation. - - -- - - - -#### `tf.contrib.distributions.beta_aa.survival_function(value, name='survival_function', **condition_kwargs)` {#beta_aa.survival_function} - -Survival function. - -Given random variable `X`, the survival function is defined: - -``` -survival_function(x) = P[X > x] - = 1 - P[X <= x] - = 1 - cdf(x). -``` - -##### Args: - - -* `value`: `float` or `double` `Tensor`. -* `name`: The name to give this op. -* `**condition_kwargs`: Named arguments forwarded to subclass implementation. - -##### Returns: - - Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type - `self.dtype`. - - -- - - - -#### `tf.contrib.distributions.beta_aa.validate_args` {#beta_aa.validate_args} - -Python boolean indicated possibly expensive checks are enabled. - - -- - - - -#### `tf.contrib.distributions.beta_aa.variance(name='variance')` {#beta_aa.variance} - -Variance. - - diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.distributions.register_pairwise_kls.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.distributions.register_pairwise_kls.md new file mode 100644 index 00000000000000..d3b9ff52119dfd --- /dev/null +++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.contrib.distributions.register_pairwise_kls.md @@ -0,0 +1,14 @@ +### `tf.contrib.distributions.register_pairwise_kls(kl_classes, kl_fn)` {#register_pairwise_kls} + +Registers `kl_fn` for each pair of classes in `kl_classes`. + +##### Args: + + +* `kl_classes`: classes for which to register KL implementation +* `kl_fn`: The function to use for the KL divergence. + +##### Returns: + + None + diff --git a/tensorflow/g3doc/api_docs/python/index.md b/tensorflow/g3doc/api_docs/python/index.md index 036067fa102104..aa908f64785353 100644 --- a/tensorflow/g3doc/api_docs/python/index.md +++ b/tensorflow/g3doc/api_docs/python/index.md @@ -729,8 +729,6 @@ * [`Bernoulli`](../../api_docs/python/contrib.distributions.md#Bernoulli) * [`BernoulliWithSigmoidP`](../../api_docs/python/contrib.distributions.md#BernoulliWithSigmoidP) * [`Beta`](../../api_docs/python/contrib.distributions.md#Beta) - * [`beta_aa`](../../api_docs/python/contrib.distributions.md#beta_aa) - * [`beta_bb`](../../api_docs/python/contrib.distributions.md#beta_bb) * [`BetaWithSoftplusAB`](../../api_docs/python/contrib.distributions.md#BetaWithSoftplusAB) * [`Binomial`](../../api_docs/python/contrib.distributions.md#Binomial) * [`Categorical`](../../api_docs/python/contrib.distributions.md#Categorical) @@ -762,6 +760,7 @@ * [`NormalWithSoftplusSigma`](../../api_docs/python/contrib.distributions.md#NormalWithSoftplusSigma) * [`Poisson`](../../api_docs/python/contrib.distributions.md#Poisson) * [`QuantizedDistribution`](../../api_docs/python/contrib.distributions.md#QuantizedDistribution) + * [`register_pairwise_kls`](../../api_docs/python/contrib.distributions.md#register_pairwise_kls) * [`RegisterKL`](../../api_docs/python/contrib.distributions.md#RegisterKL) * [`StudentT`](../../api_docs/python/contrib.distributions.md#StudentT) * [`StudentTWithAbsDfSoftplusSigma`](../../api_docs/python/contrib.distributions.md#StudentTWithAbsDfSoftplusSigma) diff --git a/tensorflow/g3doc/how_tos/embedding_viz/index.md b/tensorflow/g3doc/how_tos/embedding_viz/index.md new file mode 100644 index 00000000000000..59e893684968b2 --- /dev/null +++ b/tensorflow/g3doc/how_tos/embedding_viz/index.md @@ -0,0 +1,270 @@ +# TensorBoard: Embedding Visualization + +Embeddings are ubiquitous in machine learning, appearing in recommender systems, +NLP, and many other applications. Indeed, in the context of TensorFlow, it's +natural to view tensors (or slices of tensors) as points in space, so almost any +TensorFlow system will naturally give rise to various embeddings. + +To learn more about embeddings and how to train them, see the +[Vector Representations of Words](../../tutorials/word2vec/index.md) tutorial. +If you are interested in embeddings of images, check out +[this article](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) for +interesting visualizations of MNIST images. On the other hand, if you are +interested in word embeddings, +[this article](http://colah.github.io/posts/2015-01-Visualizing-Representations/) +gives a good introduction. + +TensorBoard has a built-in visualizer, called the Embedding Projector, for +interactive visualization and analysis of high-dimensional data like embeddings. +It is meant to be useful for developers and researchers alike. It reads from the +checkpoint files where you save your tensorflow variables. Although it's most +useful for embeddings, it will load any 2D tensor, potentially including your +training weights. + + + +By default, the Embedding Projector performs 3-dimensional +[principal component analysis](https://en.wikipedia.org/wiki/Principal_component_analysis), +meaning it takes your high-dimensional data and tries to find a +structure-preserving projection onto three dimensional space. Basically, it does +this by rotating your data so that the first three dimensions reveal as much of +the variance in the data as possible. There's a nice visual explanation +[here](http://setosa.io/ev/principal-component-analysis/). Another extremely +useful projection you can use is +[t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding). +We talk about more t-SNE later in the tutorial. + +If you are working with an embedding, you'll probably want to attach +labels/images to the data points to tell the visualizer what label/image each +data point corresponds to. You can do this by generating a metadata file, and +attaching it to the tensor using our Python API, or uploading it to an +already-running TensorBoard. + + + +## Setup + +For in depth information on how to run TensorBoard and make sure you are +logging all the necessary information, +see [TensorBoard: Visualizing Learning](../../how_tos/summaries_and_tensorboard/index.md). + +To visualize your embeddings, there are 3 things you need to do: + +1) Setup a 2D tensor variable(s) that holds your embedding(s). + +```python +embedding_var = tf.Variable(....) +``` + +2) Periodically save your embeddings in a LOG_DIR. + +```python +saver = tf.train.Saver() +saver.save(session, os.path.join(LOG_DIR, "model.ckpt"), step) +``` + +The following step is not required, however if you have any metadata +(labels, images) associated with your embedding, you need to link them to the +tensor so TensorBoard knows about it. + +3) Associate metadata with your embedding. + +```python +from tensorflow.contrib.tensorboard.plugins import projector +# Use the same LOG_DIR where you stored your checkpoint. +summary_writer = tf.train.SummaryWriter(LOG_DIR) + +# Format: tensorflow/contrib/tensorboard/plugins/projector/projector_config.proto +config = projector.ProjectorConfig() + +# You can add multiple embeddings. Here we add only one. +embedding = config.embedding.add() +embedding.tensor_name = embedding_var.name +# Link this tensor to its metadata file (e.g. labels). +embedding.metadata_path = os.path.join(LOG_DIR, 'metadata.tsv') + +# Saves a configuration file that TensorBoard will read during startup. +projector.visualize_embeddings(summary_writer, config) +``` + +After running your model and training your embeddings, run TensorBoard and point +it to the LOG_DIR of the job. + +```python +tensorboard --logdir=LOG_DIR +``` + +Then click on the *Embeddings* tab on the top pane +and select the appropriate run (if there are more than one run). + + +## Metadata (optional) +Usually embeddings have metadata associated with it (e.g. labels, images). The +metadata should be stored in a separate file outside of the model checkpoint +since the metadata is not a trainable parameter of the model. The format should +be a TSV file with the first line containing column headers and subsequent lines +contain the metadata values. Here's an example: + +``` +Name\tType\n +Caterpie\tBug\n +Charmeleon\tFire\n +… +``` + +There is no explicit key shared with the main data file; instead, the order in +the metadata file is assumed to match the order in the embedding tensor. In +other words, the first line is the header information and the (i+1)-th line in +the metadata file corresponds to the i-th row of the embedding tensor stored in +the checkpoint. + +Note: If the TSV metadata file has only a single column, then we don’t expect a +header row, and assume each row is the label of the embedding. We include this +exception because it matches the commonly-used "vocab file" format. + +### Images +If you have images associated with your embeddings, you will need to +produce a single image consisting of small thumbnails of each data point. +This is known as the +[sprite image](https://www.google.com/webhp#q=what+is+a+sprite+image). +The sprite should have the same number of rows and columns with thumbnails +stored in row-first order: the first data point placed in the top left and the +last data point in the bottom right: + + + + + + + + + + + + + + + + + +
012
345
67
+ +Note in the example above that the last row doesn't have to be filled. For a +concrete example of a sprite, see +[this sprite image](../../images/mnist_10k_sprite.png) of 10,000 MNIST digits +(100x100). + +Note: We currently support sprites up to 4096px X 4096px. + + + +After constructing the sprite, you need to tell the Embedding Projector where +to find it: + + +```python +embedding.sprite.image_path = PATH_TO_SPRITE_IMAGE +# Specify the width and height of a single thumbnail. +embedding.single_image_dim.extend([w, h]) +``` + +## Interaction + +The Embedding Projector has three panels: + +1. *Data panel* on the top left, where you can choose the run, the embedding + tensor and data columns to color and label points by. +2. *Projections panel* on the bottom left, where you choose the type of + projection (e.g. PCA, t-SNE). +3. *Inspector panel* on the right side, where you can search for particular + points and see a list of nearest neighbors. + +### Projections +The Embedding Projector has three methods of reducing the dimensionality of a +data set: two linear and one nonlinear. Each method can be used to create either +a two- or three-dimensional view. + +**Principal Component Analysis** A straightforward technique for reducing +dimensions is Principal Component Analysis (PCA). The Embedding Projector +computes the top 10 principal components. The menu lets you project those +components onto any combination of two or three. PCA is a linear projection, +often effective at examining global geometry. + +**t-SNE** A popular non-linear dimensionality reduction technique is t-SNE. +The Embedding Projector offers both two- and three-dimensional t-SNE views. +Layout is performed client-side animating every step of the algorithm. Because +t-SNE often preserves some local structure, it is useful for exploring local +neighborhoods and finding clusters. Although extremely useful for visualizing +high-dimensional data, t-SNE plots can sometimes be mysterious or misleading. +See this [great article](http://distill.pub/2016/misread-tsne/) for how to use +t-SNE effectively. + +**Custom** You can also construct specialized linear projections based on text +searches for finding meaningful directions in space. To define a projection +axis, enter two search strings or regular expressions. The program computes the +centroids of the sets of points whose labels match these searches, and uses the +difference vector between centroids as a projection axis. + +### Navigation + +To explore a data set, you can navigate the views in either a 2D or a 3D mode, +zooming, rotating, and panning using natural click-and-drag gestures. +Clicking on a point causes the right pane to show an explicit textual list of +nearest neighbors, along with distances to the current point. The +nearest-neighbor points themselves are highlighted on the projection. + +Zooming into the cluster gives some information, but it is sometimes more +helpful to restrict the view to a subset of points and perform projections only +on those points. To do so, you can select points in multiple ways: + +1. After clicking on a point, its nearest neighbors are also selected. +2. After a search, the points matching the query are selected. +3. Enabling selection, clicking on a point and dragging defines a selection + sphere. + +After selecting a set of points, you can isolate those points for +further analysis on their own with the "Isolate Points" button in the Inspector +pane on the right hand side. + + +![Selection of nearest neighbors](../../images/embedding-nearest-points.png "Selection of nearest neighbors") +*Selection of the nearest neighbors of “important” in a word embedding dataset.* + +The combination of filtering with custom projection can be powerful. Below, we filtered +the 100 nearest neighbors of “politics” and projected them onto the +“best” - “worst” vector as an x axis. The y axis is random. + +You can see that on the right side we have “ideas”, “science”, “perspective”, +“journalism” while on the left we have “crisis”, “violence” and “conflict”. + + + + + + + + + + +
+ Custom controls panel + + Custom projection +
+ Custom projection controls. + + Custom projection of neighbors of "politics" onto "best" - "worst" vector. +
+ +### Collaborative Features + +To share your findings, you can use the bookmark panel in the bottom right +corner and save the current state (including computed coordinates of any +projection) as a small file. The Projector can then be pointed to a set of one +or more of these files, producing the panel below. Other users can then walk +through a sequence of bookmarks. + +Bookmark panel diff --git a/tensorflow/g3doc/how_tos/index.md b/tensorflow/g3doc/how_tos/index.md index 1b144b1e43bf22..ceac8237a476dd 100644 --- a/tensorflow/g3doc/how_tos/index.md +++ b/tensorflow/g3doc/how_tos/index.md @@ -36,6 +36,13 @@ you understand the dataflow graph and debug it. [View Tutorial](graph_viz/index.md) +## TensorBoard: Embedding Visualization + +This tutorial describes how to use the embedding projector in TensorBoard to +visualize your embeddings. + +[View Tutorial](embedding_viz/index.md) + ## Reading Data This tutorial describes the three main methods of getting data into your diff --git a/tensorflow/g3doc/how_tos/leftnav_files b/tensorflow/g3doc/how_tos/leftnav_files index 3e350b2b26fa2f..39f4fa947cd632 100644 --- a/tensorflow/g3doc/how_tos/leftnav_files +++ b/tensorflow/g3doc/how_tos/leftnav_files @@ -2,6 +2,7 @@ variables/index.md ../tutorials/mnist/tf/index.md summaries_and_tensorboard/index.md graph_viz/index.md +embedding_viz/index.md reading_data/index.md threading_and_queues/index.md distributed/index.md diff --git a/tensorflow/python/debug/BUILD b/tensorflow/python/debug/BUILD index b948e43501d89a..658fb628a6e992 100644 --- a/tensorflow/python/debug/BUILD +++ b/tensorflow/python/debug/BUILD @@ -123,6 +123,13 @@ py_binary( deps = ["//tensorflow:tensorflow_py"], ) +py_binary( + name = "debug_errors", + srcs = ["examples/debug_errors.py"], + srcs_version = "PY2AND3", + deps = ["//tensorflow:tensorflow_py"], +) + py_binary( name = "debug_mnist", srcs = ["examples/debug_mnist.py"], diff --git a/tensorflow/python/debug/cli/analyzer_cli.py b/tensorflow/python/debug/cli/analyzer_cli.py index 872624707a83b3..868aeaef2073d6 100644 --- a/tensorflow/python/debug/cli/analyzer_cli.py +++ b/tensorflow/python/debug/cli/analyzer_cli.py @@ -489,7 +489,8 @@ def print_tensor(self, args, screen_info=None): return self._error("\"%s\" is not a valid tensor name" % parsed.tensor_name) - if not self._debug_dump.node_exists(node_name): + if (self._debug_dump.loaded_partition_graphs and + not self._debug_dump.node_exists(node_name)): return self._error( "Node \"%s\" does not exist in partition graphs" % node_name) diff --git a/tensorflow/python/debug/cli/tensor_format.py b/tensorflow/python/debug/cli/tensor_format.py index 83f1bd70d87b8d..0f9c5dab1f3645 100644 --- a/tensorflow/python/debug/cli/tensor_format.py +++ b/tensorflow/python/debug/cli/tensor_format.py @@ -56,7 +56,12 @@ def format_tensor( if tensor_name is not None: lines.append("Tensor \"%s\":" % tensor_name) - if not isinstance(tensor, np.ndarray): + if tensor is None: + if lines: + lines.append("") + lines.append("Uninitialized tensor") + return debugger_cli_common.RichTextLines(lines) + elif not isinstance(tensor, np.ndarray): # If tensor is not a np.ndarray, return simple text-line representation of # the object without annotations. if lines: diff --git a/tensorflow/python/debug/cli/tensor_format_test.py b/tensorflow/python/debug/cli/tensor_format_test.py index f2ea29622163e5..5d9b150e7c6219 100644 --- a/tensorflow/python/debug/cli/tensor_format_test.py +++ b/tensorflow/python/debug/cli/tensor_format_test.py @@ -260,8 +260,7 @@ def testFormatTensorWithEllipses(self): def testFormatNone(self): out = tensor_format.format_tensor(None, "a") - self.assertEqual( - ["Tensor \"a\":", "", "None"], out.lines) + self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor"], out.lines) def testLocateTensorElement1DNoEllipsis(self): a = np.zeros(20) @@ -450,8 +449,7 @@ def testLocateTensorElement3DWithEllipses(self): def testLocateTensorElementAnnotationsUnavailable(self): out = tensor_format.format_tensor(None, "a") - self.assertEqual( - ["Tensor \"a\":", "", "None"], out.lines) + self.assertEqual(["Tensor \"a\":", "", "Uninitialized tensor"], out.lines) with self.assertRaisesRegexp( AttributeError, "tensor_metadata is not available in annotations"): diff --git a/tensorflow/python/debug/debug_data.py b/tensorflow/python/debug/debug_data.py index 263d09fb185cb1..c5bcf0a904e400 100644 --- a/tensorflow/python/debug/debug_data.py +++ b/tensorflow/python/debug/debug_data.py @@ -17,6 +17,7 @@ from __future__ import division from __future__ import print_function +import collections import os import numpy as np @@ -329,6 +330,14 @@ def __init__(self, dump_root, partition_graphs=None, validate=True): self._dump_root = dump_root self._dump_tensor_data = [] + # A map from node name to debug watches. + # The key is the watched node name. + # The value is a dictionary. + # Of this dictionary, the key is the watched_output_slot. + # The value is a set of debug ops watching this output slot. + self._debug_watches = collections.defaultdict( + lambda: collections.defaultdict(set)) + for root, _, files in os.walk(self._dump_root): for f in files: if f.count("_") < 3: @@ -337,8 +346,16 @@ def __init__(self, dump_root, partition_graphs=None, validate=True): debug_dump_rel_path = os.path.join( os.path.relpath(root, self._dump_root), f) - self._dump_tensor_data.append( - DebugTensorDatum(self._dump_root, debug_dump_rel_path)) + datum = DebugTensorDatum(self._dump_root, debug_dump_rel_path) + self._dump_tensor_data.append(datum) + + # Attempt to load the debug watches from the tensor dump files first, + # before loading the full set of debug watches from the partition + # graphs as done further below. + # This is necessary because sometimes the partition graphs may not be + # available, e.g., when the run errors out. + self._debug_watches[datum.node_name][datum.output_slot].add( + datum.debug_op) # Sort the data by ascending timestamp. # This sorting order reflects the order in which the TensorFlow @@ -384,7 +401,6 @@ def __init__(self, dump_root, partition_graphs=None, validate=True): self._devices = None self._node_devices = None self._node_op_types = None - self._debug_watches = None # Check the dump data against partition executor graphs. if partition_graphs: @@ -449,13 +465,6 @@ def _load_partition_graphs(self, partition_graphs): # A map from node name to control recipients of the node. self._node_ctrl_recipients = {} - # A map from node name to debug watches. - # The key is the watched node name. - # The value is a dictionary. - # Of this dictionary, the key is the watched_output_slot. - # The value is a list of debug ops watching this output slot. - self._debug_watches = {} - # A map from node name to devices (as indices to self._devices) self._devices = [] self._node_devices = {} @@ -475,17 +484,8 @@ def _load_partition_graphs(self, partition_graphs): (watched_node_name, watched_output_slot, _, debug_op) = _parse_debug_node_name(node.name) - if watched_node_name not in self._debug_watches: - self._debug_watches[ - watched_node_name] = {watched_output_slot: [debug_op]} - else: - if watched_output_slot not in self._debug_watches[ - watched_node_name]: - self._debug_watches[watched_node_name][ - watched_output_slot] = [debug_op] - else: - self._debug_watches[watched_node_name][watched_node_name].append( - debug_op) + self._debug_watches[watched_node_name][watched_output_slot].add( + debug_op) continue @@ -637,6 +637,10 @@ def _validate_dump_with_graphs(self): else: del recipient_pending_inputs[recipient_pending_inputs.index(node)] + def loaded_partition_graphs(self): + """Test whether partition graphs have been loaded.""" + return self._partition_graphs is not None + def partition_graphs(self): """Get the partition graphs. diff --git a/tensorflow/python/debug/examples/README.md b/tensorflow/python/debug/examples/README.md index f0aaf6b0fc8b64..7840f039f7eace 100644 --- a/tensorflow/python/debug/examples/README.md +++ b/tensorflow/python/debug/examples/README.md @@ -303,3 +303,20 @@ Frequently-asked questions: **A**: In your BUILD rule, declare the dependency: `"//tensorflow:tensorflow_py"`. In your Python file, do: `from tensorflow.python import debug as tf_debug` + +* **Q**: Does tfdbg help debugging runtime errors such as shape mismatches?
+**A**: Yes. tfdbg intercepts errors generated by ops during runtime and presents + the errors with some debug instructions to the user in the CLI. + See examples: + + ``` + # Debugging shape mismatch during matrix multiplication. + bazel build -c opt tensorflow/python/debug:debug_errors && \ + bazel-bin/tensorflow/python/debug/debug_errors \ + -error shape_mismatch --debug + + # Debugging uninitialized variable. + bazel build -c opt tensorflow/python/debug:debug_errors && \ + bazel-bin/tensorflow/python/debug/debug_errors \ + -error uninitialized_variable --debug + ``` diff --git a/tensorflow/python/debug/examples/debug_errors.py b/tensorflow/python/debug/examples/debug_errors.py new file mode 100644 index 00000000000000..83e2b5d7e72023 --- /dev/null +++ b/tensorflow/python/debug/examples/debug_errors.py @@ -0,0 +1,61 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Example of debugging TensorFlow runtime errors using tfdbg.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow as tf + +from tensorflow.python import debug as tf_debug + +flags = tf.app.flags +FLAGS = flags.FLAGS +flags.DEFINE_string("error", "shape_mismatch", "Type of the error to generate " + "(shape_mismatch | uninitialized_variable | no_error).") +flags.DEFINE_boolean("debug", False, + "Use debugger to track down bad values during training") + + +def main(_): + sess = tf.Session() + + # Construct the TensorFlow network. + ph_float = tf.placeholder(tf.float32, name="ph_float") + x = tf.transpose(ph_float, name="x") + v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v") + m = tf.constant( + np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]), + dtype=tf.float32, + name="m") + y = tf.matmul(m, x, name="y") + z = tf.matmul(m, v, name="z") + + if FLAGS.debug: + sess = tf_debug.LocalCLIDebugWrapperSession(sess) + + if FLAGS.error == "shape_mismatch": + print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])})) + elif FLAGS.error == "uninitialized_variable": + print(sess.run(z)) + elif FLAGS.error == "no_error": + print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])})) + else: + raise ValueError("Unrecognized error type: " + FLAGS.error) + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensorflow/python/debug/session_debug_test.py b/tensorflow/python/debug/session_debug_test.py index 87f324390b0932..1667be373b11c7 100644 --- a/tensorflow/python/debug/session_debug_test.py +++ b/tensorflow/python/debug/session_debug_test.py @@ -31,6 +31,7 @@ from tensorflow.python.debug import debug_utils from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops @@ -117,6 +118,7 @@ def testDumpToFileOverlappingParentDir(self): dump = debug_data.DebugDumpDir( self._dump_root, partition_graphs=run_metadata.partition_graphs) + self.assertTrue(dump.loaded_partition_graphs()) # Verify the dumped tensor values for u and v. self.assertEqual(2, dump.size) @@ -178,6 +180,7 @@ def testDifferentWatchesOnDifferentRuns(self): dump = debug_data.DebugDumpDir( run_dump_root, partition_graphs=run_metadata.partition_graphs) + self.assertTrue(dump.loaded_partition_graphs()) # Each run should have generated only one dumped tensor, not two. self.assertEqual(1, dump.size) @@ -610,6 +613,7 @@ def testDumpGraphStructureLookup(self): with self.assertRaisesRegexp(RuntimeError, "No partition graphs have been loaded"): dump.partition_graphs() + self.assertFalse(dump.loaded_partition_graphs()) with self.assertRaisesRegexp( RuntimeError, "Node inputs are not loaded from partition graphs yet"): @@ -864,6 +868,40 @@ def testWatchingUnconnectedOutputTensor(self): self.assertAllClose([0, 0, 1, 2, 2], unique_x_slot_1_dumps[0].get_tensor()) + def testRunWithError(self): + """Test the debug tensor dumping when error occurs in graph runtime.""" + + with session.Session() as sess: + ph = tf.placeholder(tf.float32, name="mismatch/ph") + x = tf.transpose(ph, name="mismatch/x") + m = constant_op.constant( + np.array( + [[1.0, 2.0]], dtype=np.float32), name="mismatch/m") + y = math_ops.matmul(m, x, name="mismatch/y") + + run_options = config_pb2.RunOptions(output_partition_graphs=True) + debug_utils.watch_graph( + run_options, + sess.graph, + debug_ops=["DebugIdentity"], + debug_urls="file://%s" % self._dump_root) + + with self.assertRaises(errors.OpError): + sess.run(y, + options=run_options, + feed_dict={ph: np.array([[-3.0], [0.0]])}) + + dump = debug_data.DebugDumpDir(self._dump_root) + self.assertFalse(dump.loaded_partition_graphs()) + + m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity") + self.assertEqual(1, len(m_dumps)) + self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor()) + + x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity") + self.assertEqual(1, len(x_dumps)) + self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor()) + if __name__ == "__main__": googletest.main() diff --git a/tensorflow/python/debug/wrappers/framework.py b/tensorflow/python/debug/wrappers/framework.py index 8dab1ddf1a8dbe..97d3fe1f8e46fb 100644 --- a/tensorflow/python/debug/wrappers/framework.py +++ b/tensorflow/python/debug/wrappers/framework.py @@ -116,6 +116,7 @@ from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.debug import debug_utils +from tensorflow.python.framework import errors # Helper function. @@ -253,13 +254,22 @@ class OnRunEndRequest(object): The callback is invoked immediately before the wrapped run() call ends. """ - def __init__(self, performed_action, run_metadata=None): + def __init__(self, + performed_action, + run_metadata=None, + client_graph_def=None, + tf_error=None): """Constructor for OnRunEndRequest. Args: performed_action: (OnRunStartAction) Actually-performed action by the debug-wrapper session. run_metadata: run_metadata output from the run() call (if any). + client_graph_def: (GraphDef) GraphDef from the client side, i.e., from + the python front end of TensorFlow. Can be obtained with + session.graph.as_graph_def(). + tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred + during the run (if any). """ _check_type(performed_action, str) @@ -268,6 +278,8 @@ def __init__(self, performed_action, run_metadata=None): if run_metadata is not None: _check_type(run_metadata, config_pb2.RunMetadata) self.run_metadata = run_metadata + self.client_graph_def = client_graph_def + self.tf_error = tf_error class OnRunEndResponse(object): @@ -367,16 +379,24 @@ def run(self, fetches, feed_dict=None, options=None, run_metadata=None): self._decorate_run_options(decorated_run_options, run_start_resp.debug_urls) - # Invoke the run() method of the wrapped Session. - retvals = self._sess.run( - fetches, - feed_dict=feed_dict, - options=decorated_run_options, - run_metadata=run_metadata) + # Invoke the run() method of the wrapped Session. Catch any TensorFlow + # runtime errors. + tf_error = None + try: + retvals = self._sess.run(fetches, + feed_dict=feed_dict, + options=decorated_run_options, + run_metadata=run_metadata) + except errors.OpError as op_error: + tf_error = op_error + retvals = op_error - # Prepare arg for the on-run-end callback. run_end_req = OnRunEndRequest( - run_start_resp.action, run_metadata=run_metadata) + run_start_resp.action, + run_metadata=run_metadata, + client_graph_def=self._sess.graph.as_graph_def(), + tf_error=tf_error) + elif run_start_resp.action == OnRunStartAction.NON_DEBUG_RUN: # Invoke run() method of the wrapped session. retvals = self._sess.run( diff --git a/tensorflow/python/debug/wrappers/framework_test.py b/tensorflow/python/debug/wrappers/framework_test.py index d92fec016fba31..613728ed4e1dbb 100644 --- a/tensorflow/python/debug/wrappers/framework_test.py +++ b/tensorflow/python/debug/wrappers/framework_test.py @@ -26,7 +26,11 @@ from tensorflow.python.debug import debug_data from tensorflow.python.debug.wrappers import framework from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import errors +from tensorflow.python.framework import ops from tensorflow.python.framework import test_util +from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest @@ -70,6 +74,7 @@ def on_run_end(self, request): self._obs["on_run_end_count"] += 1 self._obs["performed_action"] = request.performed_action + self._obs["tf_error"] = request.tf_error return framework.OnRunEndResponse() @@ -138,6 +143,7 @@ def setUp(self): "run_feed_dict": None, "on_run_end_count": 0, "performed_action": None, + "tf_error": None, } self._dump_root = tempfile.mkdtemp() @@ -153,6 +159,8 @@ def setUp(self): self._b_init = constant_op.constant( self._b_init_val, shape=[2, 1], name="b_init") + self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph") + self._a = variables.Variable(self._a_init, name="a1") self._b = variables.Variable(self._b_init, name="b") self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c") @@ -160,6 +168,9 @@ def setUp(self): # Matrix product of a and b. self._p = math_ops.matmul(self._a, self._b, name="p1") + # Matrix product of a and ph. + self._q = math_ops.matmul(self._a, self._ph, name="q") + # Sum of two vectors. self._s = math_ops.add(self._p, self._c, name="s") @@ -171,6 +182,8 @@ def tearDown(self): # Tear down temporary dump directory. shutil.rmtree(self._dump_root) + ops.reset_default_graph() + def testSessionInit(self): self.assertEqual(0, self._observer["sess_init_count"]) @@ -235,6 +248,9 @@ def testSessionRun(self): framework.OnRunStartAction.DEBUG_RUN, self._observer["performed_action"]) + # No TensorFlow runtime error should have happened. + self.assertIsNone(self._observer["tf_error"]) + def testSessionInitInvalidSessionType(self): """Attempt to wrap a non-Session-type object should cause an exception.""" @@ -265,6 +281,24 @@ def testRunStartBadURLs(self): with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"): wrapper.run(self._s) + def testErrorDuringRun(self): + + wrapper = TestDebugWrapperSession(self._sess, self._dump_root, + self._observer) + + # No matrix size mismatch. + self.assertAllClose( + np.array([[11.0], [-1.0]]), + wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])})) + self.assertEqual(1, self._observer["on_run_end_count"]) + self.assertIsNone(self._observer["tf_error"]) + + # Now there should be a matrix size mismatch error. + wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])}) + self.assertEqual(2, self._observer["on_run_end_count"]) + self.assertTrue( + isinstance(self._observer["tf_error"], errors.InvalidArgumentError)) + if __name__ == "__main__": googletest.main() diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper.py b/tensorflow/python/debug/wrappers/local_cli_wrapper.py index 15ff75b6767583..a5974afde8971a 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper.py @@ -246,24 +246,60 @@ def on_run_end(self, request): """ if request.performed_action == framework.OnRunStartAction.DEBUG_RUN: + partition_graphs = None + if request.run_metadata and request.run_metadata.partition_graphs: + partition_graphs = request.run_metadata.partition_graphs + elif request.client_graph_def: + partition_graphs = [request.client_graph_def] + debug_dump = debug_data.DebugDumpDir( - self._dump_root, - partition_graphs=request.run_metadata.partition_graphs) - - init_command = "lt" - title_color = "green" - if self._run_till_filter_pass: - if not debug_dump.find( - self._tensor_filters[self._run_till_filter_pass], first_n=1): - # No dumped tensor passes the filter in this run. Clean up the dump - # directory and move on. - shutil.rmtree(self._dump_root) - return framework.OnRunEndResponse() - else: - # Some dumped tensor(s) from this run passed the filter. - init_command = "lt -f %s" % self._run_till_filter_pass - title_color = "red" - self._run_till_filter_pass = None + self._dump_root, partition_graphs=partition_graphs) + + if request.tf_error: + op_name = request.tf_error.op.name + + # Prepare help introduction for the TensorFlow error that occurred + # during the run. + help_intro = [ + "--------------------------------------", + "!!! An error occurred during the run !!!", + "", + " * Use command \"ni %s\" to see the information about the " + "failing op." % op_name, + " * Use command \"li -r %s\" to see the inputs to the " + "failing op." % op_name, + " * Use command \"lt\" to view the dumped tensors.", + "", + "Op name: " + op_name, + "Error type: " + str(type(request.tf_error)), + "", + "Details:", + str(request.tf_error), + "", + "WARNING: Using client GraphDef due to the error, instead of " + "executor GraphDefs.", + "--------------------------------------", + "", + ] + init_command = "help" + title_color = "red" + else: + help_intro = None + init_command = "lt" + + title_color = "green" + if self._run_till_filter_pass: + if not debug_dump.find( + self._tensor_filters[self._run_till_filter_pass], first_n=1): + # No dumped tensor passes the filter in this run. Clean up the dump + # directory and move on. + shutil.rmtree(self._dump_root) + return framework.OnRunEndResponse() + else: + # Some dumped tensor(s) from this run passed the filter. + init_command = "lt -f %s" % self._run_till_filter_pass + title_color = "red" + self._run_till_filter_pass = None analyzer = analyzer_cli.DebugAnalyzer(debug_dump) @@ -327,6 +363,7 @@ def on_run_end(self, request): # completion contexts and registered command handlers. title = "run-end: " + self._run_description + run_end_cli.set_help_intro(help_intro) run_end_cli.run_ui( init_command=init_command, title=title, title_color=title_color) diff --git a/tensorflow/python/framework/function.py b/tensorflow/python/framework/function.py index 35166f60722221..e1f635d7931e28 100644 --- a/tensorflow/python/framework/function.py +++ b/tensorflow/python/framework/function.py @@ -530,6 +530,11 @@ def _create_definition_if_needed(self): # Build the FunctionDef self._definition = _graph_to_function_def(temp_graph, inputs, outputs) + # Extra kwargs are treated as attrs on the function def. + kwargs_attr = _parse_kwargs_as_attrs(**self._extra_kwargs) + for k in kwargs_attr: + self._definition.attr[k].CopyFrom(kwargs_attr[k]) + # Hash the definition and its dependencies. hasher = hashlib.sha1() @@ -607,10 +612,6 @@ def add_to_graph(self, g): def __call__(self, *args, **kwargs): self.add_to_graph(ops.get_default_graph()) args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs - if self._extra_kwargs: - for k in self._extra_kwargs: - if k not in kwargs: - kwargs[k] = self._extra_kwargs[k] return _call(self._definition.signature, *args, **kwargs) # NOTE: The list needs to be extended when more data types are added. diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py index 9b0c08f3544bd2..edbfffceccc5d3 100644 --- a/tensorflow/python/framework/function_test.py +++ b/tensorflow/python/framework/function_test.py @@ -167,19 +167,20 @@ def testSymGradShape(self): self.assertEqual(y.get_shape(), dy.get_shape()) def testSymGradAttr(self): + @function.Defun(noinline=True) def Foo(x): return x * 2 + self.assertTrue( + Foo.instantiate([tf.float32]).definition.attr["_noinline"].b) + g = tf.Graph() with g.as_default(): x = tf.constant(3.0) y = Foo(x) dx, = tf.gradients(y, [x]) - self.assertTrue(y.op.node_def.attr["_noinline"].b) - self.assertTrue(dx.op.node_def.attr['f'].func.attr['_noinline'].b) - cfg = tf.ConfigProto(graph_options=tf.GraphOptions( optimizer_options=tf.OptimizerOptions( opt_level=tf.OptimizerOptions.L0, @@ -191,7 +192,6 @@ def Foo(x): self.assertAllClose(y.eval(), 6.) self.assertAllClose(dx.eval(), 2.) - def testZNoDepOnY(self): @function.Defun(tf.float32, tf.float32) @@ -764,7 +764,6 @@ def testFoo(self): do_constant_folding=True))) for noinline in [False, True]: - # pylint: disable=unexpected-keyword-arg @function.Defun(dtype, noinline=noinline) def Cell(v): # If v is a vector [n, 1], x is a big square matrix. @@ -778,6 +777,8 @@ def Forward(x): x = Cell(x) return tf.reduce_sum(x, [0, 1]) + self.assertEqual(noinline, Cell.definition.attr["_noinline"].b) + g = tf.Graph() with g.as_default(): x = tf.placeholder(dtype) @@ -786,12 +787,25 @@ def Forward(x): np.random.seed(321) inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32) + run_metadata = tf.RunMetadata() with tf.Session(graph=g, config=cfg) as sess: - ans = sess.run([y, dx], {x: inp}) + ans = sess.run( + [y, dx], {x: inp}, + run_metadata=run_metadata, + options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)) print(ans[0], np.sum(ans[1])) self.assertAllClose(ans[0], 255.971, rtol=1e-3) self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3) + def MetadataHasCell(run_metadata): + for dev_stats in run_metadata.step_stats.dev_stats: + for node_stats in dev_stats.node_stats: + if "Cell" in node_stats.timeline_label: + return True + return False + + self.assertEqual(MetadataHasCell(run_metadata), noinline) + @function.Defun(*[tf.float32] * 3) def Linear(w, b, x): diff --git a/tensorflow/tensorboard/TAG b/tensorflow/tensorboard/TAG index 81b5c5d06cc0b8..e522732c77ec94 100644 --- a/tensorflow/tensorboard/TAG +++ b/tensorflow/tensorboard/TAG @@ -1 +1 @@ -37 +38