How to import data of Google+ to Joplin ?

29 x served & 14 x viewed

Install JOPLIN : https://joplin.cozic.net ,  and start REST API.

Step 1 : Download all with https://takeout.google.com

Step 2 : Uncompress and put all on same folder.

Step 3 : Put this script in folder.

Step 4 : Edit the script and put your token

The script :

#
# Version 1 
# for Python 3
# 
#   ARIAS Frederic
#   Sorry ... It's difficult for me the python :)
#


from os import listdir
from pathlib import Path
import glob
import csv
import locale
import os
import time
from datetime import datetime
import json
import requests

nb_metadata = 0
nb_metadata_import = 0
def month_string_to_number(string):
    m = {
        'janv.': 1,
        'feb.': 2,
        'févr.': 2,
        'mar.': 3,
        'mars': 3,
        'apr.':4,
        'avr.':4,
         'may.':5,
         'mai':5,
         'juin':6,
         'juil.':7,
         'aug.':8,
         'août':8,
         'sept.':9,
         'oct.':10,
         'nov.':11,
         'déc.':12
        }
    s = string.strip()[:5].lower()

    try:
        out = m[s]
        return out
    except:
        raise ValueError('Not a month')

locale.setlocale(locale.LC_TIME, 'fr_FR.UTF-8')
#today = datetime.date.today()
#print(today.strftime('The date :%d %b. %Y à %H:%M:%S UTC'))
from time import strftime,localtime
print(localtime())
print(strftime("%H:%M:%S, %d %b. %Y",localtime()))
date = datetime.strptime('2017-05-04',"%Y-%m-%d")

#Token
ip = "127.0.0.1"
port = "41184"
token = "Put your token here"

nb_import = 0;
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}

url_notes = (
    "http://"+ip+":"+port+"/notes?"
    "token="+token
)
url_folders = (
    "http://"+ip+":"+port+"/folders?"
    "token="+token
)
url_tags = (
    "http://"+ip+":"+port+"/tags?"
    "token="+token
)
url_ressources = (
    "http://"+ip+":"+port+"/ressources?"
    "token="+token
)

#Init
GooglePlus_UID = "12345678901234567801234567890123"
UID = {}

payload = {
    "id":GooglePlus_UID,
    "title":"GooglePlus Import"
}

try:
    resp = requests.post(url_folders, data=json.dumps(payload, separators=(',',':')), headers=headers)
    resp.raise_for_status()
    resp_dict = resp.json()
    print(resp_dict)
    print("My ID")
    print(resp_dict['id'])
    GooglePlus_UID_real = resp_dict['id']
    save = str(resp_dict['id'])
    UID[GooglePlus_UID]= save
except requests.exceptions.HTTPError as e:
    print("Bad HTTP status code:", e)
except requests.exceptions.RequestException as e:
    print("Network error:", e)

for csvfilename in glob.iglob('Takeout*/**/*.metadata.csv', recursive=True):
  nb_metadata += 1
  print(nb_metadata," ",csvfilename)
  #print("Picture:"+os.path.basename(csvfilename))
  mybasename = os.path.basename(csvfilename)
  mylist = mybasename.split(".")
  myfilename = mylist[0] + "." + mylist[1]
  filename = os.path.dirname(csvfilename)+"/"+myfilename
  my_file = Path(filename)
  with open(csvfilename) as csvfile:
    reader = csv.DictReader(csvfile)
    for row in reader:
        if (len(row['description']) > 0):
            print(row['title'], row['description'], row['creation_time.formatted'], row['geo_data.latitude'], row['geo_data.longitude'])
            #date = datetime.strptime(row['creation_time.formatted'], "%d %b %Y à %H:%M:%S %Z").timetuple()
            #print(date)
            mylist2 = row['creation_time.formatted'].split(" ");
            mylist3 = mylist2[4].split(":");
            date = date.replace(hour=int(mylist3[0]), year=int(mylist2[2]), month=month_string_to_number(mylist2[1]), day=int(mylist2[0]))
            timestamp = time.mktime(date.timetuple())*1000
            print(timestamp)
            nb_metadata_import += 1
            mybody = row['description']
            if (len(row['geo_data.latitude']) > 2):
              payload_note = {
                "parent_id":GooglePlus_UID_real,
                "title":row['creation_time.formatted'],
                "source":myfilename,
                "source_url":row['url'],
                "order":nb_metadata_import,
                "body":mybody
                }
              payload_note_put = {
                "latitude":float(row['geo_data.latitude']),
                "longitude":float(row['geo_data.longitude']),
                "source":myfilename,
                "source_url":row['url'],
                "order":nb_metadata_import,
                "user_created_time":timestamp,
                "user_updated_time":timestamp,
                "author":"Google+"
                }
            else:
               payload_note = {
                "parent_id":GooglePlus_UID_real,
                "title":row['creation_time.formatted'],
                "source":myfilename,
                "source_url":row['url'],
                "order":nb_metadata_import,
                "user_created_time":timestamp,
                "user_updated_time":timestamp,
                "author":"Google+",
                "body":mybody
                }
               payload_note_put = {
                "source":myfilename,
                "order":nb_metadata_import,
                "source_url":row['url'],
                "user_created_time":timestamp,
                "user_updated_time":timestamp,
                "author":"Google+"
                }

            try:
                resp = requests.post(url_notes, json=payload_note)
                resp.raise_for_status()
                resp_dict = resp.json()
                print(resp_dict)
                print(resp_dict['id'])
                myuid= resp_dict['id']
            except requests.exceptions.HTTPError as e:
                print("Bad HTTP status code:", e)
            except requests.exceptions.RequestException as e:
                print("Network error:", e)

            url_notes_put = (
    "http://"+ip+":"+port+"/notes/"+myuid+"?"
    "token="+token
)

            try:
                resp = requests.put(url_notes_put, json=payload_note_put)
                resp.raise_for_status()
                resp_dict = resp.json()
                print(resp_dict)
            except requests.exceptions.HTTPError as e:
                print("Bad HTTP status code:", e)
            except requests.exceptions.RequestException as e:
                print("Network error:", e)
            
            if my_file.is_file():
               cmd = "curl -F 'data=@"+filename+"' -F 'props={\"title\":\""+myfilename+"\"}' http://"+ip+":"+port+"/resources?token="+token
               print("Command"+cmd)
               resp = os.popen(cmd).read()
               try:
                  respj = json.loads(resp)
                  print(respj['id'])
                  myuid_picture= respj['id']
               except:
                  print('bad json: ', resp)

               mybody = row['description'] + "\n  ![" + myfilename + "](:/" + myuid_picture + ")   \n";

               payload_note_put = {
                "body":mybody
                }

               try:
                  resp = requests.put(url_notes_put, json=payload_note_put)
                  resp.raise_for_status()
                  resp_dict = resp.json()
                  print(resp_dict)
               except requests.exceptions.HTTPError as e:
                  print("Bad HTTP status code:", e)
               except requests.exceptions.RequestException as e:
                  print("Network error:", e)

print(nb_metadata)
print(nb_metadata_import)

Facebook : Publicité gratuite !

21 x served & 3 x viewed

Je conseille cette émission : https://www.franceculture.fr/numerique/facebook-15-ans-beaucoup-de-critiques-mais-toujours-plus-damis : Facebook : 15 ans, beaucoup de critiques mais toujours plus d’amis

« #Facebook se fait de l’argent en vendant à des annonceurs votre temps de cerveau disponible. Ce temps va se traduire en grande partie par vos données personnelles. Cela a une valeur fantastique pour des annonceurs qui veulent s’adresser à vous, et pas à votre voisin. »

A voir aussi :

Facebook : la partie non visible …

Lien

87 x served & 9 x viewed

A lire : https://www.01net.com/actualites/facebook-un-univers-impitoyable-pour-ses-employes-1607735.html : Facebook, un univers impitoyable pour ses employés

les salariés de Facebook affichent un taux de satisfaction de leur entreprise toujours très élevé dans les sondages. Mais ce sentiment n’aurait rien de spontané.

S’informer via les réseaux sociaux cela donne quoi ?

30 x served & 6 x viewed

C’est à pleurer ! Petite série de Tweets à écouter/lire . Je pense que le problème des Fakes news n’est pas pris aux sérieux par nos dirigeants . C’est dramatique … :

Bilan sur Twitters

26 x served & 8 x viewed

Via Twitter Analytics : https://analytics.twitter.com/ sur mon compte : https://twitter.com/CYBERNEURONES

Les comptes à suivre sur Twitters :


TweetsImpressions du TweetVisites du profilMentionsNouveaux abonnés
déc. 181611110005216511
nov. 185420300253235
oct. 185233000184162
sept. 18581980017382
août 1822120005562
juil. 183019800875-12
juin 1874251001663714
mai 185914800121152
avr. 18633490014871
mars 18133555002203115
févr. 186824500163125
janv. 189127700212183
déc. 17631610029215-1
nov. 177722900152300
oct. 179030400200556
sept. 17963240026220-2
août 1710623700260417
juil. 178928600135161