Skip to content

Commit

Permalink
Result of new doc style with fixes (huggingface#17015)
Browse files Browse the repository at this point in the history
* Result of new doc style with fixes

* Add last two files

* Bump hf-doc-builder
  • Loading branch information
sgugger authored and stevhliu committed May 3, 2022
1 parent 52d8c08 commit e64a6fa
Show file tree
Hide file tree
Showing 28 changed files with 58 additions and 58 deletions.
4 changes: 2 additions & 2 deletions docs/source/en/model_doc/bert-generation.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Usage:
>>> input_ids = tokenizer(
... "This is a long article to summarize", add_special_tokens=False, return_tensors="pt"
>>> ).input_ids
... ).input_ids
>>> labels = tokenizer("This is a short summary", return_tensors="pt").input_ids
>>> # train...
Expand All @@ -67,7 +67,7 @@ Usage:
>>> input_ids = tokenizer(
... "This is the first sentence. This is the second sentence.", add_special_tokens=False, return_tensors="pt"
>>> ).input_ids
... ).input_ids
>>> outputs = sentence_fuser.generate(input_ids)
Expand Down
2 changes: 1 addition & 1 deletion docs/source/en/model_doc/luke.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ Example:
>>> entities = [
... "Beyoncé",
... "Los Angeles",
>>> ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**inputs)
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@
"ftfy",
"fugashi>=1.0",
"GitPython<3.1.19",
"hf-doc-builder>=0.2.0",
"hf-doc-builder>=0.3.0",
"huggingface-hub>=0.1.0,<1.0",
"importlib_metadata",
"ipadic>=1.0.0,<2.0",
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/dependency_versions_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.2.0",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.1.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,7 @@ def forward(
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained(
... "bert-base-uncased", "bert-base-uncased"
>>> ) # initialize Bert2Bert from pre-trained checkpoints
... ) # initialize Bert2Bert from pre-trained checkpoints
>>> # training
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ def call(
>>> # forward
>>> input_ids = tokenizer.encode(
... "Hello, my dog is cute", add_special_tokens=True, return_tensors="tf"
>>> ) # Batch size 1
... ) # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=input_ids)
>>> # training
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/gpt2/modeling_tf_gpt2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ def call(
>>> embedding_layer = model.resize_token_embeddings(
... len(tokenizer)
>>> ) # Update the model embeddings with the new vocabulary size
... ) # Update the model embeddings with the new vocabulary size
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/imagegpt/modeling_imagegpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ def forward(
>>> samples = output[:, 1:].cpu().detach().numpy()
>>> samples_img = [
... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples
>>> ] # convert color cluster tokens back to pixels
... ] # convert color cluster tokens back to pixels
>>> f, axes = plt.subplots(1, batch_size, dpi=300)
>>> for img, ax in zip(samples_img, axes):
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/longformer/modeling_longformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1634,18 +1634,18 @@ def forward(
>>> attention_mask = torch.ones(
... input_ids.shape, dtype=torch.long, device=input_ids.device
>>> ) # initialize to local attention
... ) # initialize to local attention
>>> global_attention_mask = torch.zeros(
... input_ids.shape, dtype=torch.long, device=input_ids.device
>>> ) # initialize to global attention to be deactivated for all tokens
... ) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[
... :,
... [
... 1,
... 4,
... 21,
... ],
>>> ] = 1 # Set global attention to random tokens for the sake of this example
... ] = 1 # Set global attention to random tokens for the sake of this example
>>> # Usually, set global attention based on the task. For example,
>>> # classification: the <s> token
>>> # QA: question tokens
Expand Down Expand Up @@ -2025,7 +2025,7 @@ def forward(
>>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1]
>>> answer = tokenizer.decode(
... tokenizer.convert_tokens_to_ids(answer_tokens)
>>> ) # remove space prepending space token
... ) # remove space prepending space token
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict

Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/luke/modeling_luke.py
Original file line number Diff line number Diff line change
Expand Up @@ -953,11 +953,11 @@ def forward(
>>> entities = [
... "Beyoncé",
... "Los Angeles",
>>> ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
>>> entity_spans = [
... (0, 7),
... (17, 28),
>>> ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> encoding = tokenizer(
... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt"
Expand Down Expand Up @@ -1435,7 +1435,7 @@ def forward(
>>> entity_spans = [
... (0, 7),
... (17, 28),
>>> ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/openai/modeling_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -674,7 +674,7 @@ def forward(
>>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-gpt")
>>> tokenizer.add_special_tokens(
... {"cls_token": "[CLS]"}
>>> ) # Add a [CLS] to the vocabulary (we should train it also!)
... ) # Add a [CLS] to the vocabulary (we should train it also!)
>>> model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/openai/modeling_tf_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -693,9 +693,9 @@ def call(
>>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()}
>>> inputs["mc_token_ids"] = tf.constant(
... [inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1]
>>> )[
... )[
... None, :
>>> ] # Batch size 1
... ] # Batch size 1
>>> outputs = model(inputs)
>>> lm_prediction_scores, mc_prediction_scores = outputs[:2]
```"""
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/prophetnet/modeling_prophetnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1813,7 +1813,7 @@ def forward(
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
Expand Down Expand Up @@ -1935,7 +1935,7 @@ def forward(
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
Expand Down Expand Up @@ -2202,7 +2202,7 @@ def forward(
>>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids
>>> labels = tokenizer_dec(
... "us rejects charges against its ambassador in bolivia", return_tensors="pt"
>>> ).input_ids
... ).input_ids
>>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:])
>>> loss = outputs.loss
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/rag/modeling_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -826,7 +826,7 @@ def forward(
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(
... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
>>> ).squeeze(1)
... ).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(
... context_input_ids=docs_dict["context_input_ids"],
Expand Down Expand Up @@ -1293,7 +1293,7 @@ def forward(
>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
>>> doc_scores = torch.bmm(
... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
>>> ).squeeze(1)
... ).squeeze(1)
>>> # 3. Forward to generator
>>> outputs = model(
... context_input_ids=docs_dict["context_input_ids"],
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/rag/retrieval_rag.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ class RagRetriever:
>>> dataset = (
... ...
>>> ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
... ) # dataset must be a datasets.Datasets object with columns "title", "text" and "embeddings", and it must have a faiss index
>>> retriever = RagRetriever.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", indexed_dataset=dataset)
>>> # To load your own indexed dataset built with the datasets library that was saved on disk. More info in examples/rag/use_own_knowledge_dataset.py
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/realm/modeling_realm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1782,7 +1782,7 @@ def forward(
... add_special_tokens=False,
... return_token_type_ids=False,
... return_attention_mask=False,
>>> ).input_ids
... ).input_ids
>>> reader_output, predicted_answer_ids = model(**question_ids, answer_ids=answer_ids, return_dict=False)
>>> predicted_answer = tokenizer.decode(predicted_answer_ids)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1387,7 +1387,7 @@ def call(
>>> input_features = processor(
... ds["speech"][0], sampling_rate=16000, return_tensors="tf"
>>> ).input_features # Batch size 1
... ).input_features # Batch size 1
>>> generated_ids = model.generate(input_features)
>>> transcription = processor.batch_decode(generated_ids)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/t5/modeling_flax_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -1344,7 +1344,7 @@ class FlaxT5Model(FlaxT5PreTrainedModel):
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="np"
>>> ).input_ids
... ).input_ids
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids
>>> # forward pass
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/t5/modeling_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -1375,7 +1375,7 @@ def forward(
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> # forward pass
Expand Down Expand Up @@ -1583,7 +1583,7 @@ def forward(
>>> # inference
>>> input_ids = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you.
Expand Down Expand Up @@ -1831,7 +1831,7 @@ def forward(
>>> model = T5EncoderModel.from_pretrained("t5-small")
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids)
>>> last_hidden_states = outputs.last_hidden_state
```"""
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/t5/modeling_tf_t5.py
Original file line number Diff line number Diff line change
Expand Up @@ -1165,7 +1165,7 @@ def call(
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="tf"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="tf").input_ids # Batch size 1
>>> # forward pass
Expand Down Expand Up @@ -1353,7 +1353,7 @@ def call(
>>> # inference
>>> inputs = tokenizer(
... "summarize: studies have shown that owning a dog is good for you", return_tensors="tf"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> outputs = model.generate(inputs)
>>> print(tokenizer.decode(outputs[0], skip_special_tokens=True))
>>> # studies have shown that owning a dog is good for you
Expand Down Expand Up @@ -1642,7 +1642,7 @@ def call(
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="tf"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> outputs = model(input_ids)
```"""

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/tapas/modeling_tapas.py
Original file line number Diff line number Diff line change
Expand Up @@ -1068,7 +1068,7 @@ def forward(
... )
>>> labels = tokenizer(
... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
>>> )["input_ids"]
... )["input_ids"]
>>> outputs = model(**inputs, labels=labels)
>>> logits = outputs.logits
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/tapas/modeling_tf_tapas.py
Original file line number Diff line number Diff line change
Expand Up @@ -1095,7 +1095,7 @@ def call(
... )
>>> labels = tokenizer(
... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf"
>>> )["input_ids"]
... )["input_ids"]
>>> outputs = model(**inputs, labels=labels)
>>> logits = outputs.logits
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
>>> output_ids = model.generate(
... pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True
>>> ).sequences
... ).sequences
>>> preds = decoder_tokenizer.batch_decode(output_ids, skip_special_tokens=True)
>>> preds = [pred.strip() for pred in preds]
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1081,7 +1081,7 @@ class FlaxWav2Vec2Model(FlaxWav2Vec2PreTrainedModel):
>>> input_values = processor(
... ds["speech"][0], sampling_rate=16_000, return_tensors="np"
>>> ).input_values # Batch size 1
... ).input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```
"""
Expand Down Expand Up @@ -1200,7 +1200,7 @@ class FlaxWav2Vec2ForCTC(FlaxWav2Vec2PreTrainedModel):
>>> input_values = processor(
... ds["speech"][0], sampling_rate=16_000, return_tensors="np"
>>> ).input_values # Batch size 1
... ).input_values # Batch size 1
>>> logits = model(input_values).logits
>>> predicted_ids = jnp.argmax(logits, axis=-1)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/xlm/modeling_xlm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1039,7 +1039,7 @@ def forward(
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
>>> ) # Batch size 1
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ class XLMProphetNetModel(ProphetNetModel):
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
Expand All @@ -124,7 +124,7 @@ class XLMProphetNetForConditionalGeneration(ProphetNetForConditionalGeneration):
>>> input_ids = tokenizer(
... "Studies have been shown that owning a dog is good for you", return_tensors="pt"
>>> ).input_ids # Batch size 1
... ).input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/xlnet/modeling_tf_xlnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1281,17 +1281,17 @@ def call(
>>> # We show how to setup inputs to predict a next token using a bi-directional context.
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[
... None, :
>>> ] # We will predict the masked token
... ] # We will predict the masked token
>>> perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1]))
>>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
>>> target_mapping = np.zeros(
... (1, 1, input_ids.shape[1])
>>> ) # Shape [1, 1, seq_length] => let's predict one token
... ) # Shape [1, 1, seq_length] => let's predict one token
>>> target_mapping[
... 0, 0, -1
>>> ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
>>> outputs = model(
... input_ids,
Expand All @@ -1301,7 +1301,7 @@ def call(
>>> next_token_logits = outputs[
... 0
>>> ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
```"""
transformer_outputs = self.transformer(
input_ids=input_ids,
Expand Down
Loading

0 comments on commit e64a6fa

Please sign in to comment.