Add files via upload

master
ritvikmath 2020-09-29 07:39:41 -07:00 committed by GitHub
parent c7918ac389
commit 8751a07585
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 179 additions and 0 deletions

View File

@ -0,0 +1,179 @@
{
"cells": [
{
"cell_type": "raw",
"metadata": {},
"source": [
"Steps:\n",
" \n",
"1. Visit https://console.cloud.google.com/\n",
"\n",
"2. Create a new project if none exist (otherwise you can use an existing one)\n",
"\n",
"3. Click \"Go to APIs Overview\" -> Click \"Enable APIs and Services\" -> Enable the \"Cloud Vision API\"\n",
"\n",
"4. Click the \"Credentials\" tab on the left. Click \"+ CREATE CREDENTIALS\" at the top and choose \"Service Account\". Give the service account a name and click \"Create\"\n",
"\n",
"5. Click on the newly created service account, ensure it is enabled, and click \"ADD KEY\" -> \"Create new key\". Pick \"JSON\" and download the json file and store it in the current working directory.\n",
"\n",
"6. Run the cells in this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pip install google-cloud-vision"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from google.cloud import vision\n",
"from google.cloud.vision import types"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#the JSON file you downloaded in step 5 above\n",
"os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = ''"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Instantiates a client\n",
"client = vision.ImageAnnotatorClient()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#set this thumbnail as the url\n",
"image = types.Image()\n",
"image.source.image_uri = 'https://i.ytimg.com/vi/UQQHSbeIaB0/maxresdefault.jpg'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#### LABEL DETECTION ######\n",
"\n",
"response_label = client.label_detection(image=image)\n",
"\n",
"for label in response_label.label_annotations:\n",
" print({'label': label.description, 'score': label.score})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#### FACE DETECTION ######\n",
"\n",
"response_face = client.face_detection(image=image)\n",
"\n",
"face_data = []\n",
"\n",
"for face_detection in response_face.face_annotations:\n",
" d = {\n",
" 'confidence': face_detection.detection_confidence,\n",
" 'joy': face_detection.joy_likelihood,\n",
" 'sorrow': face_detection.sorrow_likelihood,\n",
" 'surprise': face_detection.surprise_likelihood,\n",
" 'anger': face_detection.anger_likelihood\n",
" }\n",
" print(d)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#### IMAGE PROPERTIES ######\n",
"\n",
"response_image = client.image_properties(image=image)\n",
"\n",
"image_data = []\n",
"\n",
"for c in response_image.image_properties_annotation.dominant_colors.colors[:3]:\n",
" d = {\n",
" 'color': c.color,\n",
" 'score': c.score,\n",
" 'pixel_fraction': c.pixel_fraction\n",
" }\n",
" print(d)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#### TEXT DETECTION ######\n",
"\n",
"response_text = client.text_detection(image=image)\n",
"\n",
"for r in response_text.text_annotations:\n",
" d = {\n",
" 'text': r.description\n",
" }\n",
" print(d)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4
}