Skip to main content
Version: 0.9.5

Cognitive Services - Analyze Text

import os

if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia":
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
from notebookutils.mssparkutils.credentials import getSecret
os.environ['TEXT_API_KEY'] = getSecret("mmlspark-keys", "mmlspark-cs-key")

#put your service keys here
key = os.environ['TEXT_API_KEY']
location = 'eastus'

df = spark.createDataFrame(data=[
["en", "Hello Seattle"],
["en", "There once was a dog who lived in London and thought she was a human"]
from import *

text_analyze = (TextAnalyze()
# set the tasks to perform
.setEntityRecognitionTasks([{"parameters": { "model-version": "latest"}}])
.setKeyPhraseExtractionTasks([{"parameters": { "model-version": "latest"}}])
# Uncomment these lines to add more tasks
# .setEntityRecognitionPiiTasks([{"parameters": { "model-version": "latest"}}])
# .setEntityLinkingTasks([{"parameters": { "model-version": "latest"}}])
# .setSentimentAnalysisTasks([{"parameters": { "model-version": "latest"}}])

df_results = text_analyze.transform(df)

from pyspark.sql.functions import col

# reformat and display for easier viewing
display("language", "text", "error", col("textAnalysis").getItem(0)) # we are not batching so only have a single result
.select("language", "text", "error", "textAnalysis[0].*") # explode the Text Analytics tasks into columns