Skip to content

Commit 00d9a09

Browse files
authored
Add doc-string examples (#66)
* Add doc-string examples * Revert API files * Improve loss doc-strings
1 parent 24d0015 commit 00d9a09

14 files changed

+463
-20
lines changed

keras_rs/src/layers/feature_interaction/dot_interaction.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,53 @@ class DotInteraction(keras.layers.Layer):
3030
but is much slower.
3131
**kwargs: Args to pass to the base class.
3232
33+
Example:
34+
35+
```python
36+
# 1. Simple forward pass
37+
batch_size = 2
38+
embedding_dim = 32
39+
feature1 = np.random.randn(batch_size, embedding_dim)
40+
feature2 = np.random.randn(batch_size, embedding_dim)
41+
feature3 = np.random.randn(batch_size, embedding_dim)
42+
feature_interactions = keras_rs.layers.DotInteraction()(
43+
[feature1, feature2, feature3]
44+
)
45+
46+
# 2. After embedding layer in a model
47+
vocabulary_size = 32
48+
embedding_dim = 6
49+
50+
# Create a simple model containing the layer.
51+
feature_input_1 = keras.Input(shape=(), name='indices_1', dtype="int32")
52+
feature_input_2 = keras.Input(shape=(), name='indices_2', dtype="int32")
53+
feature_input_3 = keras.Input(shape=(), name='indices_3', dtype="int32")
54+
x1 = keras.layers.Embedding(
55+
input_dim=vocabulary_size,
56+
output_dim=embedding_dim
57+
)(feature_input_1)
58+
x2 = keras.layers.Embedding(
59+
input_dim=vocabulary_size,
60+
output_dim=embedding_dim
61+
)(feature_input_2)
62+
x3 = keras.layers.Embedding(
63+
input_dim=vocabulary_size,
64+
output_dim=embedding_dim
65+
)(feature_input_3)
66+
feature_interactions = keras_rs.layers.DotInteraction()([x1, x2, x3])
67+
output = keras.layers.Dense(units=10)(x2)
68+
model = keras.Model(
69+
[feature_input_1, feature_input_2, feature_input_3], output
70+
)
71+
72+
# Call the model on the inputs.
73+
batch_size = 2
74+
f1 = np.random.randint(0, vocabulary_size, size=(batch_size,))
75+
f2 = np.random.randint(0, vocabulary_size, size=(batch_size,))
76+
f3 = np.random.randint(0, vocabulary_size, size=(batch_size,))
77+
outputs = model([f1, f2, f3])
78+
```
79+
3380
References:
3481
- [M. Naumov et al.](https://arxiv.org/abs/1906.00091)
3582
"""

keras_rs/src/layers/feature_interaction/feature_cross.py

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,32 @@ class FeatureCross(keras.layers.Layer):
5757
Example:
5858
5959
```python
60-
# after embedding layer in a functional model
61-
input = keras.Input(shape=(), name='indices', dtype="int64")
62-
x0 = keras.layers.Embedding(input_dim=32, output_dim=6)(x0)
63-
x1 = FeatureCross()(x0, x0)
64-
x2 = FeatureCross()(x0, x1)
60+
# 1. Simple forward pass
61+
batch_size = 2
62+
embedding_dim = 32
63+
feature1 = np.random.randn(batch_size, embedding_dim)
64+
feature2 = np.random.randn(batch_size, embedding_dim)
65+
crossed_features = keras_rs.layers.FeatureCross()(feature1, feature2)
66+
67+
# 2. After embedding layer in a model
68+
vocabulary_size = 32
69+
embedding_dim = 6
70+
71+
# Create a simple model containing the layer.
72+
inputs = keras.Input(shape=(), name='indices', dtype="int32")
73+
x0 = keras.layers.Embedding(
74+
input_dim=vocabulary_size,
75+
output_dim=embedding_dim
76+
)(inputs)
77+
x1 = keras_rs.layers.FeatureCross()(x0, x0)
78+
x2 = keras_rs.layers.FeatureCross()(x0, x1)
6579
logits = keras.layers.Dense(units=10)(x2)
66-
model = keras.Model(input, logits)
80+
model = keras.Model(inputs, logits)
81+
82+
# Call the model on the inputs.
83+
batch_size = 2
84+
input_data = np.random.randint(0, vocabulary_size, size=(batch_size,))
85+
outputs = model(input_data)
6786
```
6887
6988
References:

keras_rs/src/losses/pairwise_hinge_loss.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,69 @@ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
1919
`y_i > y_j`.
2020
"""
2121
extra_args = ""
22+
example = """
23+
1. With `compile()` API:
24+
25+
```python
26+
model.compile(
27+
loss=keras_rs.losses.PairwiseHingeLoss(),
28+
...
29+
)
30+
```
31+
32+
2. As a standalone function:
33+
2.1. Unbatched inputs
34+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
35+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
36+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
37+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
38+
2.32000
39+
40+
2.2 Batched inputs
41+
2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
42+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
43+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
44+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
45+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
46+
0.75
47+
48+
2.2.2. With masked inputs (useful for ragged inputs)
49+
>>> y_true = {
50+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
51+
... "mask": np.array(
52+
... [[True, True, True, True], [True, True, False, False]]
53+
... ),
54+
... }
55+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
56+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
57+
0.64999
58+
59+
2.2.3 With `sample_weight`
60+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
61+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
62+
>>> sample_weight = np.array(
63+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
64+
... )
65+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss()
66+
>>> pairwise_hinge_loss(
67+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
68+
... )
69+
1.02499
70+
71+
2.2.4 Using `'none'` reduction.
72+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
73+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
74+
>>> pairwise_hinge_loss = keras_rs.losses.PairwiseHingeLoss(
75+
... reduction="none"
76+
... )
77+
>>> pairwise_hinge_loss(y_true=y_true, y_pred=y_pred)
78+
[[3. , 0. , 2. , 0.], [0., 0.20000005, 0.79999995, 0.]]
79+
"""
80+
2281
PairwiseHingeLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
2382
loss_name="hinge loss",
2483
formula=formula,
2584
explanation=explanation,
2685
extra_args=extra_args,
86+
example=example,
2787
)

keras_rs/src/losses/pairwise_logistic_loss.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,69 @@ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
2828
ideal step function, making it suitable for gradient-based optimization.
2929
"""
3030
extra_args = ""
31+
example = """
32+
1. With `compile()` API:
33+
34+
```python
35+
model.compile(
36+
loss=keras_rs.losses.PairwiseLogisticLoss(),
37+
...
38+
)
39+
```
40+
41+
2. As a standalone function:
42+
2.1. Unbatched inputs
43+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
44+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
45+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
46+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
47+
>>> 1.70708
48+
49+
2.2 Batched inputs
50+
2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
51+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
52+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
53+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
54+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
55+
0.73936
56+
57+
2.2.2. With masked inputs (useful for ragged inputs)
58+
>>> y_true = {
59+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
60+
... "mask": np.array(
61+
... [[True, True, True, True], [True, True, False, False]]
62+
... ),
63+
... }
64+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
65+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
66+
0.53751
67+
68+
2.2.3 With `sample_weight`
69+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
70+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
71+
>>> sample_weight = np.array(
72+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
73+
... )
74+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss()
75+
>>> pairwise_logistic_loss(
76+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
77+
... )
78+
>>> 0.80337
79+
80+
2.2.4 Using `'none'` reduction.
81+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
82+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
83+
>>> pairwise_logistic_loss = keras_rs.losses.PairwiseLogisticLoss(
84+
... reduction="none"
85+
... )
86+
>>> pairwise_logistic_loss(y_true=y_true, y_pred=y_pred)
87+
[[2.126928, 0., 1.3132616, 0.48877698], [0., 0.20000005, 0.79999995, 0.]]
88+
"""
89+
3190
PairwiseLogisticLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
3291
loss_name="logistic loss",
3392
formula=formula,
3493
explanation=explanation,
3594
extra_args=extra_args,
95+
example=example,
3696
)

keras_rs/src/losses/pairwise_loss.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,5 +154,7 @@ def get_config(self) -> dict[str, Any]:
154154
`"float32"` unless set to different value
155155
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
156156
provided, then the `compute_dtype` will be utilized.
157-
"""
157+
158+
Examples:
159+
{example}"""
158160
)

keras_rs/src/losses/pairwise_mean_squared_error.py

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,69 @@ def compute_unreduced_loss(
6464
predicted order of items relative to their true order.
6565
"""
6666
extra_args = ""
67+
example = """
68+
1. With `compile()` API:
69+
70+
```python
71+
model.compile(
72+
loss=keras_rs.losses.PairwiseMeanSquaredError(),
73+
...
74+
)
75+
```
76+
77+
2. As a standalone function:
78+
2.1. Unbatched inputs
79+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
80+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
81+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
82+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
83+
>>> 19.10400
84+
85+
2.2 Batched inputs
86+
2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
87+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
88+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
89+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
90+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
91+
5.57999
92+
93+
2.2.2. With masked inputs (useful for ragged inputs)
94+
>>> y_true = {
95+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
96+
... "mask": np.array(
97+
... [[True, True, True, True], [True, True, False, False]]
98+
... ),
99+
... }
100+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
101+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
102+
4.76000
103+
104+
2.2.3 With `sample_weight`
105+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
106+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
107+
>>> sample_weight = np.array(
108+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
109+
... )
110+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError()
111+
>>> pairwise_mse(
112+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
113+
... )
114+
11.0500
115+
116+
2.2.4 Using `'none'` reduction.
117+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
118+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
119+
>>> pairwise_mse = keras_rs.losses.PairwiseMeanSquaredError(
120+
... reduction="none"
121+
... )
122+
>>> pairwise_mse(y_true=y_true, y_pred=y_pred)
123+
[[11., 17., 5., 5.], [2.04, 1.3199998, 1.6399999, 1.6399999]]
124+
"""
125+
67126
PairwiseMeanSquaredError.__doc__ = pairwise_loss_subclass_doc_string.format(
68127
loss_name="mean squared error",
69128
formula=formula,
70129
explanation=explanation,
71130
extra_args=extra_args,
131+
example=example,
72132
)

keras_rs/src/losses/pairwise_soft_zero_one_loss.py

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,72 @@ def pairwise_loss(self, pairwise_logits: types.Tensor) -> types.Tensor:
2424
suitable for gradient-based optimization.
2525
"""
2626
extra_args = ""
27+
example = """
28+
1. With `compile()` API:
29+
30+
```python
31+
model.compile(
32+
loss=keras_rs.losses.PairwiseSoftZeroOneLoss(),
33+
...
34+
)
35+
```
36+
37+
2. As a standalone function:
38+
2.1. Unbatched inputs
39+
>>> y_true = np.array([1.0, 0.0, 1.0, 3.0, 2.0])
40+
>>> y_pred = np.array([1.0, 3.0, 2.0, 4.0, 0.8])
41+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
42+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
43+
0.86103
44+
45+
2.2 Batched inputs
46+
2.2.1 Using default 'auto'/'sum_over_batch_size' reduction.
47+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
48+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
49+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
50+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
51+
0.46202
52+
53+
2.2.2. With masked inputs (useful for ragged inputs)
54+
>>> y_true = {
55+
... "labels": np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]]),
56+
... "mask": np.array(
57+
... [[True, True, True, True], [True, True, False, False]]
58+
... ),
59+
... }
60+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
61+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
62+
0.29468
63+
64+
2.2.3 With `sample_weight`
65+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
66+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
67+
>>> sample_weight = np.array(
68+
... [[2.0, 3.0, 1.0, 1.0], [2.0, 1.0, 0.0, 0.0]]
69+
... )
70+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss()
71+
>>> pairwise_soft_zero_one_loss(
72+
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight
73+
... )
74+
0.40478
75+
76+
2.2.4 Using `'none'` reduction.
77+
>>> y_true = np.array([[1.0, 0.0, 1.0, 3.0], [0.0, 1.0, 2.0, 3.0]])
78+
>>> y_pred = np.array([[1.0, 3.0, 2.0, 4.0], [1.0, 1.8, 2.0, 3.0]])
79+
>>> pairwise_soft_zero_one_loss = keras_rs.losses.PairwiseSoftZeroOneLoss(
80+
... reduction="none"
81+
... )
82+
>>> pairwise_soft_zero_one_loss(y_true=y_true, y_pred=y_pred)
83+
[
84+
[0.8807971 , 0., 0.73105854, 0.43557024],
85+
[0., 0.31002545, 0.7191075 , 0.61961967]
86+
]
87+
"""
88+
2789
PairwiseSoftZeroOneLoss.__doc__ = pairwise_loss_subclass_doc_string.format(
2890
loss_name="soft zero-one loss",
2991
formula=formula,
3092
explanation=explanation,
3193
extra_args=extra_args,
94+
example=example,
3295
)

0 commit comments

Comments
 (0)